diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 059a1f486..e6870605a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: os: [ "ubuntu" ] - go: [ "1.21.9" ] + go: [ "1.22.4" ] test-type: [ "detector", "coverage", "memory", "custom-build-tags" ] runs-on: ${{ matrix.os }}-latest @@ -80,6 +80,7 @@ jobs: go test -v -race ./psiphon/common/accesscontrol go test -v -race ./psiphon/common/crypto/ssh go test -v -race ./psiphon/common/fragmentor + go test -v -timeout 20m -race -tags "PSIPHON_ENABLE_INPROXY" ./psiphon/common/inproxy go test -v -race ./psiphon/common/regen go test -v -race ./psiphon/common/monotime go test -v -race ./psiphon/common/obfuscator @@ -94,7 +95,7 @@ jobs: go test -v -race ./psiphon/common/values go test -v -race ./psiphon/common/wildcard go test -v -race ./psiphon/transferstats - sudo -E env "PATH=$PATH" go test -v -timeout 20m -race -tags "PSIPHON_RUN_PACKET_MANIPULATOR_TEST" ./psiphon/server + sudo -E env "PATH=$PATH" go test -v -timeout 20m -race -tags "PSIPHON_ENABLE_INPROXY PSIPHON_RUN_PACKET_MANIPULATOR_TEST" ./psiphon/server go test -v -race ./psiphon/server/psinet go test -v -timeout 20m -race ./psiphon go test -v -race ./ClientLibrary/clientlib @@ -112,6 +113,7 @@ jobs: go test -v -covermode=count -coverprofile=accesscontrol.coverprofile ./psiphon/common/accesscontrol go test -v -covermode=count -coverprofile=ssh.coverprofile ./psiphon/common/crypto/ssh go test -v -covermode=count -coverprofile=fragmentor.coverprofile ./psiphon/common/fragmentor + go test -v -timeout 20m -covermode=count -tags "PSIPHON_ENABLE_INPROXY" -coverprofile=inproxy.coverprofile ./psiphon/common/inproxy go test -v -covermode=count -coverprofile=regen.coverprofile ./psiphon/common/regen go test -v -covermode=count -coverprofile=monotime.coverprofile ./psiphon/common/monotime go test -v -covermode=count -coverprofile=obfuscator.coverprofile ./psiphon/common/obfuscator @@ -126,9 +128,9 @@ jobs: go test -v -covermode=count -coverprofile=values.coverprofile ./psiphon/common/values go test -v -covermode=count -coverprofile=wildcard.coverprofile ./psiphon/common/wildcard go test -v -covermode=count -coverprofile=transferstats.coverprofile ./psiphon/transferstats - sudo -E env "PATH=$PATH" go test -v -timeout 20m -covermode=count -coverprofile=server.coverprofile -tags "PSIPHON_RUN_PACKET_MANIPULATOR_TEST" ./psiphon/server + sudo -E env "PATH=$PATH" go test -v -timeout 20m -covermode=count -coverprofile=server.coverprofile -tags "PSIPHON_ENABLE_INPROXY PSIPHON_RUN_PACKET_MANIPULATOR_TEST" ./psiphon/server go test -v -covermode=count -coverprofile=psinet.coverprofile ./psiphon/server/psinet - go test -v -timeout 20m -covermode=count -coverprofile=psiphon.coverprofile ./psiphon + go test -v -timeout 20m -covermode=count -coverprofile=psiphon.coverprofile ./psiphon go test -v -covermode=count -coverprofile=clientlib.coverprofile ./ClientLibrary/clientlib go test -v -covermode=count -coverprofile=analysis.coverprofile ./Server/logging/analysis $GOPATH/bin/gover @@ -145,6 +147,8 @@ jobs: if: ${{ matrix.test-type == 'custom-build-tags' }} run: | cd ${{ github.workspace }}/go/src/github.com/Psiphon-Labs/psiphon-tunnel-core/ConsoleClient + go build -a -v -tags "" + go build -a -v -tags "PSIPHON_ENABLE_INPROXY" go build -a -v -tags "PSIPHON_DISABLE_QUIC" go build -a -v -tags "PSIPHON_DISABLE_GQUIC" go build -a -v -tags "PSIPHON_ENABLE_REFRACTION_NETWORKING" diff --git a/ClientLibrary/Dockerfile b/ClientLibrary/Dockerfile index 3221164fb..12fb8eb65 100644 --- a/ClientLibrary/Dockerfile +++ b/ClientLibrary/Dockerfile @@ -21,7 +21,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ # Install Go. # NOTE: Go 1.10+ is required to build c-shared for windows (https://github.com/golang/go/commit/bb0bfd002ada7e3eb9198d4287b32c2fed6e8da6) -ENV GOVERSION=go1.21.9 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.22.4 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/ClientLibrary/build-darwin.sh b/ClientLibrary/build-darwin.sh index 60227d66e..3a6988f3c 100755 --- a/ClientLibrary/build-darwin.sh +++ b/ClientLibrary/build-darwin.sh @@ -9,8 +9,8 @@ if [ -z ${2+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$2"; fi # Note: # clangwrap.sh needs to be updated when the Go version changes. # The last version was: -# https://github.com/golang/go/blob/go1.21.9/misc/ios/clangwrap.sh -GO_VERSION_REQUIRED="1.21.9" +# https://github.com/golang/go/blob/go1.22.4/misc/ios/clangwrap.sh +GO_VERSION_REQUIRED="1.22.4" BASE_DIR=$(cd "$(dirname "$0")" ; pwd -P) cd ${BASE_DIR} diff --git a/ConsoleClient/Dockerfile b/ConsoleClient/Dockerfile index 0ff9473ff..24ce80258 100644 --- a/ConsoleClient/Dockerfile +++ b/ConsoleClient/Dockerfile @@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.21.9 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.22.4 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/ConsoleClient/main.go b/ConsoleClient/main.go index af63ef12e..95023887a 100644 --- a/ConsoleClient/main.go +++ b/ConsoleClient/main.go @@ -262,7 +262,7 @@ func main() { psiphon.NoticeInfo("write profiles") profileSampleDurationSeconds := 5 common.WriteRuntimeProfiles( - psiphon.NoticeCommonLogger(), + psiphon.NoticeCommonLogger(false), config.DataRootDirectory, "", profileSampleDurationSeconds, diff --git a/MobileLibrary/Android/Dockerfile b/MobileLibrary/Android/Dockerfile index 20a910733..f2bb02c8a 100644 --- a/MobileLibrary/Android/Dockerfile +++ b/MobileLibrary/Android/Dockerfile @@ -23,7 +23,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go. -ENV GOVERSION=go1.21.9 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 +ENV GOVERSION=go1.22.4 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \ && tar -C /usr/local -xzf /tmp/go.tar.gz \ diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 24913f2ac..297641598 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -131,6 +131,21 @@ default public void onActiveAuthorizationIDs(List authorizations) {} default public void onTrafficRateLimits(long upstreamBytesPerSecond, long downstreamBytesPerSecond) {} default public void onApplicationParameters(Object parameters) {} default public void onServerAlert(String reason, String subject, List actionURLs) {} + /** + * Called when tunnel-core emits a message to be displayed to the in-proxy operator. + * @param message The operator message received. + */ + default void onInproxyOperatorMessage(String message) {} + /** + * Called when tunnel-core reports proxy usage statistics. + * By default onInproxyProxyActivity is disabled. Enable it by setting + * EmitInproxyProxyActivity to true in the Psiphon config. + * @param connectingClients Number of clients connecting to the proxy. + * @param connectedClients Number of clients currently connected to the proxy. + * @param bytesUp Bytes uploaded through the proxy since the last report. + * @param bytesDown Bytes downloaded through the proxy since the last report. + */ + default void onInproxyProxyActivity(int connectingClients, int connectedClients,long bytesUp, long bytesDown) {} /** * Called when tunnel-core reports connected server region information. * @param region The server region received. @@ -1100,6 +1115,15 @@ private void handlePsiphonNotice(String noticeJSON) { notice.getJSONObject("data").getString("reason"), notice.getJSONObject("data").getString("subject"), actionURLsList); + } else if (noticeType.equals("InproxyOperatorMessage")) { + mHostService.onInproxyOperatorMessage( notice.getJSONObject("data").getString("message")); + } else if (noticeType.equals("InproxyProxyActivity")) { + JSONObject data = notice.getJSONObject("data"); + mHostService.onInproxyProxyActivity( + data.getInt("connectingClients"), + data.getInt("connectedClients"), + data.getLong("bytesUp"), + data.getLong("bytesDown")); } if (diagnostic) { @@ -1520,10 +1544,12 @@ private static boolean hasIPv6Route(Context context) throws Exception { // 40569). hasIPv6Route provides the same functionality via a // callback into Java code. + // Note: don't exclude interfaces with the isPointToPoint + // property, which is true for certain mobile networks. + for (NetworkInterface netInterface : Collections.list(NetworkInterface.getNetworkInterfaces())) { if (netInterface.isUp() && - !netInterface.isLoopback() && - !netInterface.isPointToPoint()) { + !netInterface.isLoopback()) { for (InetAddress address : Collections.list(netInterface.getInetAddresses())) { // Per https://developer.android.com/reference/java/net/Inet6Address#textual-representation-of-ip-addresses, diff --git a/MobileLibrary/go-mobile/app/android.go b/MobileLibrary/go-mobile/app/android.go index 08a6e0538..5ee5605e4 100644 --- a/MobileLibrary/go-mobile/app/android.go +++ b/MobileLibrary/go-mobile/app/android.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build android -// +build android /* Android Apps are built with -buildmode=c-shared. They are loaded by a @@ -366,7 +365,7 @@ func runInputQueue(vm, jniEnv, ctx uintptr) error { var q *C.AInputQueue for { - if C.ALooper_pollAll(-1, nil, nil, nil) == C.ALOOPER_POLL_WAKE { + if C.ALooper_pollOnce(-1, nil, nil, nil) == C.ALOOPER_POLL_WAKE { select { default: case p := <-pending: diff --git a/MobileLibrary/go-mobile/app/app.go b/MobileLibrary/go-mobile/app/app.go index 19efb07f1..9817e13d7 100644 --- a/MobileLibrary/go-mobile/app/app.go +++ b/MobileLibrary/go-mobile/app/app.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || darwin || windows -// +build linux darwin windows package app diff --git a/MobileLibrary/go-mobile/app/darwin_desktop.go b/MobileLibrary/go-mobile/app/darwin_desktop.go index 337717531..180c60278 100644 --- a/MobileLibrary/go-mobile/app/darwin_desktop.go +++ b/MobileLibrary/go-mobile/app/darwin_desktop.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package app diff --git a/MobileLibrary/go-mobile/app/darwin_ios.go b/MobileLibrary/go-mobile/app/darwin_ios.go index 8fb30fe82..bef9f4215 100644 --- a/MobileLibrary/go-mobile/app/darwin_ios.go +++ b/MobileLibrary/go-mobile/app/darwin_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && ios -// +build darwin,ios package app diff --git a/MobileLibrary/go-mobile/app/internal/callfn/callfn.go b/MobileLibrary/go-mobile/app/internal/callfn/callfn.go index 7a3d0506e..ecc3d456f 100644 --- a/MobileLibrary/go-mobile/app/internal/callfn/callfn.go +++ b/MobileLibrary/go-mobile/app/internal/callfn/callfn.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build android && (arm || 386 || amd64 || arm64) -// +build android -// +build arm 386 amd64 arm64 // Package callfn provides an android entry point. // diff --git a/MobileLibrary/go-mobile/app/internal/testapp/testapp.go b/MobileLibrary/go-mobile/app/internal/testapp/testapp.go index eb850955b..18a97bf6a 100644 --- a/MobileLibrary/go-mobile/app/internal/testapp/testapp.go +++ b/MobileLibrary/go-mobile/app/internal/testapp/testapp.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux -// +build darwin linux // Small test app used by app/app_test.go. package main diff --git a/MobileLibrary/go-mobile/app/shiny.go b/MobileLibrary/go-mobile/app/shiny.go index dd1722a27..0b4e3e550 100644 --- a/MobileLibrary/go-mobile/app/shiny.go +++ b/MobileLibrary/go-mobile/app/shiny.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package app diff --git a/MobileLibrary/go-mobile/app/x11.go b/MobileLibrary/go-mobile/app/x11.go index ec8c90a54..7e85e137c 100644 --- a/MobileLibrary/go-mobile/app/x11.go +++ b/MobileLibrary/go-mobile/app/x11.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !android -// +build linux,!android package app diff --git a/MobileLibrary/go-mobile/asset/asset.go b/MobileLibrary/go-mobile/asset/asset.go index 5ed3a5c5a..d23656911 100644 --- a/MobileLibrary/go-mobile/asset/asset.go +++ b/MobileLibrary/go-mobile/asset/asset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows package asset diff --git a/MobileLibrary/go-mobile/asset/asset_darwin_armx.go b/MobileLibrary/go-mobile/asset/asset_darwin_armx.go index 3eac25b04..e3b3b5a13 100644 --- a/MobileLibrary/go-mobile/asset/asset_darwin_armx.go +++ b/MobileLibrary/go-mobile/asset/asset_darwin_armx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && (arm || arm64) -// +build darwin -// +build arm arm64 package asset diff --git a/MobileLibrary/go-mobile/asset/asset_desktop.go b/MobileLibrary/go-mobile/asset/asset_desktop.go index a36099504..af7c45f82 100644 --- a/MobileLibrary/go-mobile/asset/asset_desktop.go +++ b/MobileLibrary/go-mobile/asset/asset_desktop.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && !android) || (darwin && !arm && !arm64) || windows -// +build linux,!android darwin,!arm,!arm64 windows package asset diff --git a/MobileLibrary/go-mobile/bind/genobjc.go b/MobileLibrary/go-mobile/bind/genobjc.go index d0982207d..d5914138a 100644 --- a/MobileLibrary/go-mobile/bind/genobjc.go +++ b/MobileLibrary/go-mobile/bind/genobjc.go @@ -977,7 +977,7 @@ func (g *ObjcGen) genInterfaceMethodProxy(obj *types.TypeName, m *types.Func) { if isErrorType(obj.Type()) && m.Name() == "Error" { // As a special case, ObjC NSErrors are passed to Go pretending to implement the Go error interface. - // They don't actually have an Error method, so calls to to it needs to be rerouted. + // They don't actually have an Error method, so calls to it needs to be rerouted. g.Printf("%s = [o localizedDescription];\n", s.retParams[0].name) } else { if s.ret == "void" { diff --git a/MobileLibrary/go-mobile/bind/implicit.go b/MobileLibrary/go-mobile/bind/implicit.go index 5144209c3..bf1d418c0 100644 --- a/MobileLibrary/go-mobile/bind/implicit.go +++ b/MobileLibrary/go-mobile/bind/implicit.go @@ -1,7 +1,6 @@ // This file imports implicit dependencies required by generated code. //go:build mobile_implicit -// +build mobile_implicit package bind diff --git a/MobileLibrary/go-mobile/bind/testdata/testpkg/tagged.go b/MobileLibrary/go-mobile/bind/testdata/testpkg/tagged.go index b5fe898e5..701a58044 100644 --- a/MobileLibrary/go-mobile/bind/testdata/testpkg/tagged.go +++ b/MobileLibrary/go-mobile/bind/testdata/testpkg/tagged.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aaa && bbb -// +build aaa,bbb // This file tests that tags work with gomobile. package testpkg diff --git a/MobileLibrary/go-mobile/cmd/gobind/doc.go b/MobileLibrary/go-mobile/cmd/gobind/doc.go index 31155c639..0823d5d81 100644 --- a/MobileLibrary/go-mobile/cmd/gobind/doc.go +++ b/MobileLibrary/go-mobile/cmd/gobind/doc.go @@ -218,7 +218,7 @@ to create an Android Activity subclass MainActivity: Gobind also recognizes Java interfaces as well as Objective C classes and protocols the same way. -For more details on binding the the native API, see the design proposals, +For more details on binding the native API, see the design proposals, https://golang.org/issues/16876 (Java) and https://golang.org/issues/17102 (Objective C). diff --git a/MobileLibrary/go-mobile/cmd/gobind/implicit.go b/MobileLibrary/go-mobile/cmd/gobind/implicit.go index 4289d4b36..e8f6dd57b 100644 --- a/MobileLibrary/go-mobile/cmd/gobind/implicit.go +++ b/MobileLibrary/go-mobile/cmd/gobind/implicit.go @@ -1,7 +1,6 @@ // This file imports implicit dependencies required by generated code. //go:build mobile_implicit -// +build mobile_implicit package main diff --git a/MobileLibrary/go-mobile/cmd/gomobile/bind.go b/MobileLibrary/go-mobile/cmd/gomobile/bind.go index 54eaa4bcf..fc9f75631 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/bind.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/bind.go @@ -44,7 +44,7 @@ example, in Android Studio (1.2+), an AAR file can be imported using the module import wizard (File > New > New Module > Import .JAR or .AAR package), and setting it as a new dependency (File > Project Structure > Dependencies). This requires 'javac' -(version 1.7+) and Android SDK (API level 16 or newer) to build the +(version 1.8+) and Android SDK (API level 16 or newer) to build the library for Android. The ANDROID_HOME and ANDROID_NDK_HOME environment variables can be used to specify the Android SDK and NDK if they are not in the default locations. Use the -javapkg flag to specify the Java @@ -251,7 +251,9 @@ func getModuleVersions(targetPlatform string, targetArch string, src string) (*m } f := &modfile.File{} - f.AddModuleStmt("gobind") + if err := f.AddModuleStmt("gobind"); err != nil { + return nil, err + } e := json.NewDecoder(bytes.NewReader(output)) for { var mod *Module @@ -266,13 +268,19 @@ func getModuleVersions(targetPlatform string, targetArch string, src string) (*m // replaced by a local directory p = mod.Replace.Dir } - f.AddReplace(mod.Path, mod.Version, p, v) + if err := f.AddReplace(mod.Path, mod.Version, p, v); err != nil { + return nil, err + } } else { // When the version part is empty, the module is local and mod.Dir represents the location. if v := mod.Version; v == "" { - f.AddReplace(mod.Path, mod.Version, mod.Dir, "") + if err := f.AddReplace(mod.Path, mod.Version, mod.Dir, ""); err != nil { + return nil, err + } } else { - f.AddRequire(mod.Path, v) + if err := f.AddRequire(mod.Path, v); err != nil { + return nil, err + } } } } @@ -280,6 +288,19 @@ func getModuleVersions(targetPlatform string, targetArch string, src string) (*m break } } + + v, err := ensureGoVersion() + if err != nil { + return nil, err + } + // ensureGoVersion can return an empty string for a devel version. In this case, use the minimum version. + if v == "" { + v = fmt.Sprintf("go1.%d", minimumGoMinorVersion) + } + if err := f.AddGoStmt(strings.TrimPrefix(v, "go")); err != nil { + return nil, err + } + return f, nil } diff --git a/MobileLibrary/go-mobile/cmd/gomobile/bind_androidapp.go b/MobileLibrary/go-mobile/cmd/gomobile/bind_androidapp.go index 3fa9cfa7a..f8fcabe1b 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/bind_androidapp.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/bind_androidapp.go @@ -245,7 +245,7 @@ func buildAAR(srcDir, androidDir string, pkgs []*packages.Package, targets []tar } const ( - javacTargetVer = "1.7" + javacTargetVer = "1.8" minAndroidAPI = 16 ) diff --git a/MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go b/MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go index 38437439c..9a3e52169 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go @@ -5,6 +5,8 @@ package main import ( + "bytes" + "encoding/xml" "errors" "fmt" "io" @@ -13,6 +15,7 @@ import ( "strconv" "strings" "text/template" + "time" "golang.org/x/sync/errgroup" "golang.org/x/tools/go/packages" @@ -149,9 +152,12 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) frameworkDirs = append(frameworkDirs, frameworkDir) frameworkArchCount[frameworkDir] = frameworkArchCount[frameworkDir] + 1 - versionsDir := filepath.Join(frameworkDir, "Versions") - versionsADir := filepath.Join(versionsDir, "A") - titlePath := filepath.Join(versionsADir, title) + frameworkLayout, err := frameworkLayoutForTarget(t, title) + if err != nil { + return err + } + + titlePath := filepath.Join(frameworkDir, frameworkLayout.binaryPath, title) if frameworkArchCount[frameworkDir] > 1 { // Not the first static lib, attach to a fat library and skip create headers fatCmd := exec.Command( @@ -164,17 +170,8 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) continue } - versionsAHeadersDir := filepath.Join(versionsADir, "Headers") - if err := mkdir(versionsAHeadersDir); err != nil { - return err - } - if err := symlink("A", filepath.Join(versionsDir, "Current")); err != nil { - return err - } - if err := symlink("Versions/Current/Headers", filepath.Join(frameworkDir, "Headers")); err != nil { - return err - } - if err := symlink(filepath.Join("Versions/Current", title), filepath.Join(frameworkDir, title)); err != nil { + headersDir := filepath.Join(frameworkDir, frameworkLayout.headerPath) + if err := mkdir(headersDir); err != nil { return err } @@ -197,7 +194,7 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) if len(fileBases) == 1 { headerFiles = append(headerFiles, title+".h") err := copyFile( - filepath.Join(versionsAHeadersDir, title+".h"), + filepath.Join(headersDir, title+".h"), filepath.Join(gobindDir, bindPrefix+title+".objc.h"), ) if err != nil { @@ -207,7 +204,7 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) for _, fileBase := range fileBases { headerFiles = append(headerFiles, fileBase+".objc.h") err := copyFile( - filepath.Join(versionsAHeadersDir, fileBase+".objc.h"), + filepath.Join(headersDir, fileBase+".objc.h"), filepath.Join(gobindDir, fileBase+".objc.h"), ) if err != nil { @@ -215,14 +212,14 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) } } err := copyFile( - filepath.Join(versionsAHeadersDir, "ref.h"), + filepath.Join(headersDir, "ref.h"), filepath.Join(gobindDir, "ref.h"), ) if err != nil { return err } headerFiles = append(headerFiles, title+".h") - err = writeFile(filepath.Join(versionsAHeadersDir, title+".h"), func(w io.Writer) error { + err = writeFile(filepath.Join(headersDir, title+".h"), func(w io.Writer) error { return appleBindHeaderTmpl.Execute(w, map[string]interface{}{ "pkgs": pkgs, "title": title, "bases": fileBases, }) @@ -232,14 +229,22 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) } } - if err := mkdir(filepath.Join(versionsADir, "Resources")); err != nil { - return err - } - if err := symlink("Versions/Current/Resources", filepath.Join(frameworkDir, "Resources")); err != nil { + frameworkInfoPlistDir := filepath.Join(frameworkDir, frameworkLayout.infoPlistPath) + if err := mkdir(frameworkInfoPlistDir); err != nil { return err } - err = writeFile(filepath.Join(frameworkDir, "Resources", "Info.plist"), func(w io.Writer) error { - _, err := w.Write([]byte(appleBindInfoPlist)) + err = writeFile(filepath.Join(frameworkInfoPlistDir, "Info.plist"), func(w io.Writer) error { + fmVersion := fmt.Sprintf("0.0.%d", time.Now().Unix()) + infoFrameworkPlistlData := infoFrameworkPlistlData{ + BundleID: escapePlistValue(rfc1034Label(title)), + ExecutableName: escapePlistValue(title), + Version: escapePlistValue(fmVersion), + } + infoplist := new(bytes.Buffer) + if err := infoFrameworkPlistTmpl.Execute(infoplist, infoFrameworkPlistlData); err != nil { + return err + } + _, err := w.Write(infoplist.Bytes()) return err }) if err != nil { @@ -253,15 +258,18 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) Module: title, Headers: headerFiles, } - err = writeFile(filepath.Join(versionsADir, "Modules", "module.modulemap"), func(w io.Writer) error { + modulesDir := filepath.Join(frameworkDir, frameworkLayout.modulePath) + err = writeFile(filepath.Join(modulesDir, "module.modulemap"), func(w io.Writer) error { return appleModuleMapTmpl.Execute(w, mmVals) }) if err != nil { return err } - err = symlink(filepath.Join("Versions/Current/Modules"), filepath.Join(frameworkDir, "Modules")) - if err != nil { - return err + + for src, dst := range frameworkLayout.symlinks { + if err := symlink(src, filepath.Join(frameworkDir, dst)); err != nil { + return err + } } } @@ -286,13 +294,79 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo) return err } -const appleBindInfoPlist = ` - - - - - -` +type frameworkLayout struct { + headerPath string + binaryPath string + modulePath string + infoPlistPath string + // symlinks to create in the framework. Maps src (relative to dst) -> dst (relative to framework bundle root) + symlinks map[string]string +} + +// frameworkLayoutForTarget generates the filestructure for a framework for the given target platform (macos, ios, etc), +// according to Apple's spec https://developer.apple.com/documentation/bundleresources/placing_content_in_a_bundle +func frameworkLayoutForTarget(t targetInfo, title string) (*frameworkLayout, error) { + switch t.platform { + case "macos", "maccatalyst": + return &frameworkLayout{ + headerPath: "Versions/A/Headers", + binaryPath: "Versions/A", + modulePath: "Versions/A/Modules", + infoPlistPath: "Versions/A/Resources", + symlinks: map[string]string{ + "A": "Versions/Current", + "Versions/Current/Resources": "Resources", + "Versions/Current/Headers": "Headers", + "Versions/Current/Modules": "Modules", + filepath.Join("Versions/Current", title): title, + }, + }, nil + case "ios", "iossimulator": + return &frameworkLayout{ + headerPath: "Headers", + binaryPath: ".", + modulePath: "Modules", + infoPlistPath: ".", + }, nil + } + + return nil, fmt.Errorf("unsupported platform %q", t.platform) +} + +type infoFrameworkPlistlData struct { + BundleID string + ExecutableName string + Version string +} + +// infoFrameworkPlistTmpl is a template for the Info.plist file in a framework. +// Minimum OS version == 100.0 is a workaround for SPM issue +// https://github.com/firebase/firebase-ios-sdk/pull/12439/files#diff-f4eb4ff5ec89af999cbe8fa3ffe5647d7853ffbc9c1515b337ca043c684b6bb4R679 +var infoFrameworkPlistTmpl = template.Must(template.New("infoFrameworkPlist").Parse(` + + + + CFBundleExecutable + {{.ExecutableName}} + CFBundleIdentifier + {{.BundleID}} + MinimumOSVersion + 100.0 + CFBundleShortVersionString + {{.Version}} + CFBundleVersion + {{.Version}} + CFBundlePackageType + FMWK + + +`)) + +func escapePlistValue(value string) string { + var b bytes.Buffer + xml.EscapeText(&b, []byte(value)) + return b.String() +} var appleModuleMapTmpl = template.Must(template.New("iosmmap").Parse(`framework module "{{.Module}}" { header "ref.h" diff --git a/MobileLibrary/go-mobile/cmd/gomobile/bind_test.go b/MobileLibrary/go-mobile/cmd/gomobile/bind_test.go index fa6b0efb8..42d3f1110 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/bind_test.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/bind_test.go @@ -6,7 +6,6 @@ package main import ( "bytes" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -190,7 +189,7 @@ GOOS=android CGO_ENABLED=1 gobind -lang=go,java -outdir=$WORK{{if .JavaPkg}} -ja mkdir -p $WORK/src-android-arm PWD=$WORK/src-android-arm GOMODCACHE=$GOPATH/pkg/mod GOOS=android GOARCH=arm CC=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang CXX=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang++ CGO_ENABLED=1 GOARM=7 GOPATH=$WORK:$GOPATH go mod tidy PWD=$WORK/src-android-arm GOMODCACHE=$GOPATH/pkg/mod GOOS=android GOARCH=arm CC=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang CXX=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang++ CGO_ENABLED=1 GOARM=7 GOPATH=$WORK:$GOPATH go build -x -buildmode=c-shared -o=$WORK/android/src/main/jniLibs/armeabi-v7a/libgojni.so ./gobind -PWD=$WORK/java javac -d $WORK/javac-output -source 1.7 -target 1.7 -bootclasspath {{.AndroidPlatform}}/android.jar *.java +PWD=$WORK/java javac -d $WORK/javac-output -source 1.8 -target 1.8 -bootclasspath {{.AndroidPlatform}}/android.jar *.java jar c -C $WORK/javac-output . `)) @@ -251,16 +250,75 @@ func TestBindAppleAll(t *testing.T) { } } +const ambiguousPathsGoMod = `module ambiguouspaths + +go 1.18 + +require golang.org/x/mobile v0.0.0-20230905140555-fbe1c053b6a9 + +require ( + golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63 // indirect + golang.org/x/image v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect +) +` + +const ambiguousPathsGoSum = `github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63 h1:3AGKexOYqL+ztdWdkB1bDwXgPBuTS/S8A4WzuTvJ8Cg= +golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= +golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo= +golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8= +golang.org/x/mobile v0.0.0-20230905140555-fbe1c053b6a9 h1:LaLfQUz4L1tfuOlrtEouZLZ0qHDwKn87E1NKoiudP/o= +golang.org/x/mobile v0.0.0-20230905140555-fbe1c053b6a9/go.mod h1:2jxcxt/JNJik+N+QcB8q308+SyrE3bu43+sGZDmJ02M= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +` + +const ambiguousPathsGo = `package ambiguouspaths + +import ( + _ "golang.org/x/mobile/app" +) + +func Dummy() {} +` + func TestBindWithGoModules(t *testing.T) { if runtime.GOOS == "android" || runtime.GOOS == "ios" { t.Skipf("gomobile and gobind are not available on %s", runtime.GOOS) } - dir, err := ioutil.TempDir("", "gomobile-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() if out, err := exec.Command("go", "build", "-o="+dir, "golang.org/x/mobile/cmd/gobind").CombinedOutput(); err != nil { t.Fatalf("%v: %s", err, string(out)) @@ -273,7 +331,22 @@ func TestBindWithGoModules(t *testing.T) { path += string(filepath.ListSeparator) + p } + // Create a source package dynamically to avoid go.mod files in this repository. See golang/go#34352 for more details. + if err := os.Mkdir(filepath.Join(dir, "ambiguouspaths"), 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "ambiguouspaths", "go.mod"), []byte(ambiguousPathsGoMod), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "ambiguouspaths", "go.sum"), []byte(ambiguousPathsGoSum), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "ambiguouspaths", "ambiguouspaths.go"), []byte(ambiguousPathsGo), 0644); err != nil { + t.Fatal(err) + } + for _, target := range []string{"android", "ios"} { + target := target t.Run(target, func(t *testing.T) { switch target { case "android": @@ -308,6 +381,11 @@ func TestBindWithGoModules(t *testing.T) { Path: "./bind/testdata/cgopkg", Dir: filepath.Join("..", ".."), }, + { + Name: "Ambiguous Paths", + Path: ".", + Dir: filepath.Join(dir, "ambiguouspaths"), + }, } for _, tc := range tests { diff --git a/MobileLibrary/go-mobile/cmd/gomobile/build.go b/MobileLibrary/go-mobile/cmd/gomobile/build.go index c9483434d..f8895eb31 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/build.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/build.go @@ -160,7 +160,7 @@ func runBuildImpl(cmd *command) (*packages.Package, error) { return pkg, nil } if buildBundleID == "" { - return nil, fmt.Errorf("-target=ios requires -bundleid set") + return nil, fmt.Errorf("-target=%s requires -bundleid set", buildTarget) } nmpkgs, err = goAppleBuild(pkg, buildBundleID, targets) if err != nil { diff --git a/MobileLibrary/go-mobile/cmd/gomobile/doc.go b/MobileLibrary/go-mobile/cmd/gomobile/doc.go index 10e31278b..74090da67 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/doc.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/doc.go @@ -52,7 +52,7 @@ example, in Android Studio (1.2+), an AAR file can be imported using the module import wizard (File > New > New Module > Import .JAR or .AAR package), and setting it as a new dependency (File > Project Structure > Dependencies). This requires 'javac' -(version 1.7+) and Android SDK (API level 15 or newer) to build the +(version 1.7+) and Android SDK (API level 16 or newer) to build the library for Android. The environment variable ANDROID_HOME must be set to the path to Android SDK. Use the -javapkg flag to specify the Java package prefix for the generated classes. @@ -113,7 +113,7 @@ Flag -iosversion sets the minimal version of the iOS SDK to compile against. The default version is 13.0. Flag -androidapi sets the Android API version to compile against. -The default and minimum is 15. +The default and minimum is 16. The -bundleid flag is required for -target ios and sets the bundle ID to use with the app. @@ -132,9 +132,9 @@ Usage: gomobile clean -# Clean removes object files and cached NDK files downloaded by gomobile init +Clean removes object files and cached NDK files downloaded by gomobile init. -Build OpenAL for Android +# Build OpenAL for Android Usage: diff --git a/MobileLibrary/go-mobile/cmd/gomobile/gendex.go b/MobileLibrary/go-mobile/cmd/gomobile/gendex.go index 88cf5549f..be470a2bd 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/gendex.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/gendex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore // Gendex generates a dex file used by Go apps created with gomobile. // @@ -74,8 +73,8 @@ func gendex() error { } cmd := exec.Command( "javac", - "-source", "1.7", - "-target", "1.7", + "-source", "1.8", + "-target", "1.8", "-bootclasspath", platform+"/android.jar", "-d", tmpdir+"/work", ) diff --git a/MobileLibrary/go-mobile/cmd/gomobile/main.go b/MobileLibrary/go-mobile/cmd/gomobile/main.go index 9814a0d49..fe6fd7e16 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/main.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/main.go @@ -9,7 +9,6 @@ package main import ( "bufio" "bytes" - "errors" "flag" "fmt" "html/template" @@ -24,9 +23,11 @@ import ( var ( gomobileName = "gomobile" - goVersionOut = []byte(nil) + goVersion string ) +const minimumGoMinorVersion = 18 + func printUsage(w io.Writer) { bufw := bufio.NewWriter(w) if err := usageTmpl.Execute(bufw, commands); err != nil { @@ -58,7 +59,7 @@ func main() { return } - if err := determineGoVersion(); err != nil { + if _, err := ensureGoVersion(); err != nil { fmt.Fprintf(os.Stderr, "%s: %v\n", gomobileName, err) os.Exit(1) } @@ -84,20 +85,25 @@ func main() { os.Exit(2) } -func determineGoVersion() error { +func ensureGoVersion() (string, error) { + if goVersion != "" { + return goVersion, nil + } + goVersionOut, err := exec.Command("go", "version").CombinedOutput() if err != nil { - return fmt.Errorf("'go version' failed: %v, %s", err, goVersionOut) + return "", fmt.Errorf("'go version' failed: %v, %s", err, goVersionOut) } var minor int if _, err := fmt.Sscanf(string(goVersionOut), "go version go1.%d", &minor); err != nil { // Ignore unknown versions; it's probably a devel version. - return nil + return "", nil } - if minor < 16 { - return errors.New("Go 1.16 or newer is required") + goVersion = fmt.Sprintf("go1.%d", minor) + if minor < minimumGoMinorVersion { + return "", fmt.Errorf("Go 1.%d or newer is required", minimumGoMinorVersion) } - return nil + return goVersion, nil } func help(args []string) { diff --git a/MobileLibrary/go-mobile/cmd/gomobile/tools.go b/MobileLibrary/go-mobile/cmd/gomobile/tools.go index e8d4ade00..bee46004c 100644 --- a/MobileLibrary/go-mobile/cmd/gomobile/tools.go +++ b/MobileLibrary/go-mobile/cmd/gomobile/tools.go @@ -1,7 +1,6 @@ // This file includes the tools the gomobile depends on. //go:build tools -// +build tools package main diff --git a/MobileLibrary/go-mobile/example/basic/main.go b/MobileLibrary/go-mobile/example/basic/main.go index 94932db4f..fb6167be6 100644 --- a/MobileLibrary/go-mobile/example/basic/main.go +++ b/MobileLibrary/go-mobile/example/basic/main.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows // An app that draws a green triangle on a red background. // diff --git a/MobileLibrary/go-mobile/example/basic/main_x.go b/MobileLibrary/go-mobile/example/basic/main_x.go index 52e8c5399..49d8612d1 100644 --- a/MobileLibrary/go-mobile/example/basic/main_x.go +++ b/MobileLibrary/go-mobile/example/basic/main_x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !linux && !windows -// +build !darwin,!linux,!windows package main diff --git a/MobileLibrary/go-mobile/example/flappy/game.go b/MobileLibrary/go-mobile/example/flappy/game.go index 89b8ae2db..04903c719 100644 --- a/MobileLibrary/go-mobile/example/flappy/game.go +++ b/MobileLibrary/go-mobile/example/flappy/game.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux -// +build darwin linux package main diff --git a/MobileLibrary/go-mobile/example/flappy/main.go b/MobileLibrary/go-mobile/example/flappy/main.go index 6f298ea6d..b0970f5b8 100644 --- a/MobileLibrary/go-mobile/example/flappy/main.go +++ b/MobileLibrary/go-mobile/example/flappy/main.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux -// +build darwin linux // Flappy Gopher is a simple one-button game that uses the // mobile framework and the experimental sprite engine. diff --git a/MobileLibrary/go-mobile/example/flappy/main_x.go b/MobileLibrary/go-mobile/example/flappy/main_x.go index 6739924fe..a056c79f2 100644 --- a/MobileLibrary/go-mobile/example/flappy/main_x.go +++ b/MobileLibrary/go-mobile/example/flappy/main_x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !linux -// +build !darwin,!linux package main diff --git a/MobileLibrary/go-mobile/example/ivy/tools.go b/MobileLibrary/go-mobile/example/ivy/tools.go index 85a60697e..ab067c246 100644 --- a/MobileLibrary/go-mobile/example/ivy/tools.go +++ b/MobileLibrary/go-mobile/example/ivy/tools.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore package dummy diff --git a/MobileLibrary/go-mobile/example/network/main.go b/MobileLibrary/go-mobile/example/network/main.go index 4b23cbed4..5ac2f2cc9 100644 --- a/MobileLibrary/go-mobile/example/network/main.go +++ b/MobileLibrary/go-mobile/example/network/main.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows // An app that paints green if golang.org is reachable when the app first // starts, or red otherwise. diff --git a/MobileLibrary/go-mobile/example/network/main_x.go b/MobileLibrary/go-mobile/example/network/main_x.go index 9bda7ac77..e6cd77852 100644 --- a/MobileLibrary/go-mobile/example/network/main_x.go +++ b/MobileLibrary/go-mobile/example/network/main_x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !linux && !windows -// +build !darwin,!linux,!windows package main diff --git a/MobileLibrary/go-mobile/exp/app/debug/fps.go b/MobileLibrary/go-mobile/exp/app/debug/fps.go index bef808dd5..c01570582 100644 --- a/MobileLibrary/go-mobile/exp/app/debug/fps.go +++ b/MobileLibrary/go-mobile/exp/app/debug/fps.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows // Package debug provides GL-based debugging tools for apps. package debug // import "golang.org/x/mobile/exp/app/debug" diff --git a/MobileLibrary/go-mobile/exp/audio/al/al.go b/MobileLibrary/go-mobile/exp/audio/al/al.go index b02cbb761..974488967 100644 --- a/MobileLibrary/go-mobile/exp/audio/al/al.go +++ b/MobileLibrary/go-mobile/exp/audio/al/al.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows // Package al provides OpenAL Soft bindings for Go. // diff --git a/MobileLibrary/go-mobile/exp/audio/al/al_notandroid.go b/MobileLibrary/go-mobile/exp/audio/al/al_notandroid.go index 577fee361..06fee46ac 100644 --- a/MobileLibrary/go-mobile/exp/audio/al/al_notandroid.go +++ b/MobileLibrary/go-mobile/exp/audio/al/al_notandroid.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || (linux && !android) || windows -// +build darwin linux,!android windows package al diff --git a/MobileLibrary/go-mobile/exp/audio/al/alc.go b/MobileLibrary/go-mobile/exp/audio/al/alc.go index f0b421553..1c9c44282 100644 --- a/MobileLibrary/go-mobile/exp/audio/al/alc.go +++ b/MobileLibrary/go-mobile/exp/audio/al/alc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows package al diff --git a/MobileLibrary/go-mobile/exp/audio/al/alc_notandroid.go b/MobileLibrary/go-mobile/exp/audio/al/alc_notandroid.go index 68e907c1c..a87df647f 100644 --- a/MobileLibrary/go-mobile/exp/audio/al/alc_notandroid.go +++ b/MobileLibrary/go-mobile/exp/audio/al/alc_notandroid.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || (linux && !android) || windows -// +build darwin linux,!android windows package al diff --git a/MobileLibrary/go-mobile/exp/audio/al/const.go b/MobileLibrary/go-mobile/exp/audio/al/const.go index aef054328..2408cbe97 100644 --- a/MobileLibrary/go-mobile/exp/audio/al/const.go +++ b/MobileLibrary/go-mobile/exp/audio/al/const.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows package al diff --git a/MobileLibrary/go-mobile/exp/f32/gen.go b/MobileLibrary/go-mobile/exp/f32/gen.go index fdd1c3f63..2af3327bf 100644 --- a/MobileLibrary/go-mobile/exp/f32/gen.go +++ b/MobileLibrary/go-mobile/exp/f32/gen.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore package main diff --git a/MobileLibrary/go-mobile/exp/font/font.go b/MobileLibrary/go-mobile/exp/font/font.go index c14513409..02e2b9765 100644 --- a/MobileLibrary/go-mobile/exp/font/font.go +++ b/MobileLibrary/go-mobile/exp/font/font.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || darwin -// +build linux darwin package font diff --git a/MobileLibrary/go-mobile/exp/font/font_linux.go b/MobileLibrary/go-mobile/exp/font/font_linux.go index 65316233e..f0beb34b5 100644 --- a/MobileLibrary/go-mobile/exp/font/font_linux.go +++ b/MobileLibrary/go-mobile/exp/font/font_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !android -// +build !android package font diff --git a/MobileLibrary/go-mobile/exp/font/font_test.go b/MobileLibrary/go-mobile/exp/font/font_test.go index c93c28237..f98752565 100644 --- a/MobileLibrary/go-mobile/exp/font/font_test.go +++ b/MobileLibrary/go-mobile/exp/font/font_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || darwin -// +build linux darwin package font diff --git a/MobileLibrary/go-mobile/exp/gl/glutil/context_darwin_desktop.go b/MobileLibrary/go-mobile/exp/gl/glutil/context_darwin_desktop.go index 344c73e41..725cdd2e6 100644 --- a/MobileLibrary/go-mobile/exp/gl/glutil/context_darwin_desktop.go +++ b/MobileLibrary/go-mobile/exp/gl/glutil/context_darwin_desktop.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package glutil diff --git a/MobileLibrary/go-mobile/exp/gl/glutil/context_x11.go b/MobileLibrary/go-mobile/exp/gl/glutil/context_x11.go index 394f1b197..826b026d5 100644 --- a/MobileLibrary/go-mobile/exp/gl/glutil/context_x11.go +++ b/MobileLibrary/go-mobile/exp/gl/glutil/context_x11.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !android -// +build linux,!android package glutil diff --git a/MobileLibrary/go-mobile/exp/gl/glutil/glimage.go b/MobileLibrary/go-mobile/exp/gl/glutil/glimage.go index ffe953d54..72a205f8c 100644 --- a/MobileLibrary/go-mobile/exp/gl/glutil/glimage.go +++ b/MobileLibrary/go-mobile/exp/gl/glutil/glimage.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || darwin || windows -// +build linux darwin windows package glutil diff --git a/MobileLibrary/go-mobile/exp/gl/glutil/glimage_test.go b/MobileLibrary/go-mobile/exp/gl/glutil/glimage_test.go index 975e30504..2468303b4 100644 --- a/MobileLibrary/go-mobile/exp/gl/glutil/glimage_test.go +++ b/MobileLibrary/go-mobile/exp/gl/glutil/glimage_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || (linux && !android) -// +build darwin linux,!android // TODO(crawshaw): Run tests on other OSs when more contexts are supported. diff --git a/MobileLibrary/go-mobile/exp/gl/glutil/glutil.go b/MobileLibrary/go-mobile/exp/gl/glutil/glutil.go index 18b09113b..880b97e28 100644 --- a/MobileLibrary/go-mobile/exp/gl/glutil/glutil.go +++ b/MobileLibrary/go-mobile/exp/gl/glutil/glutil.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows package glutil // import "golang.org/x/mobile/exp/gl/glutil" diff --git a/MobileLibrary/go-mobile/exp/sensor/android.c b/MobileLibrary/go-mobile/exp/sensor/android.c index 1ac722351..1cf934014 100644 --- a/MobileLibrary/go-mobile/exp/sensor/android.c +++ b/MobileLibrary/go-mobile/exp/sensor/android.c @@ -61,7 +61,7 @@ int GoAndroid_readQueue(int n, int32_t* types, int64_t* timestamps, float* vecto // Try n times read from the event queue. // If anytime timeout occurs, don't retry to read and immediately return. // Consume the event queue entirely between polls. - while (i < n && (id = ALooper_pollAll(GO_ANDROID_READ_TIMEOUT_MS, NULL, &events, NULL)) >= 0) { + while (i < n && (id = ALooper_pollOnce(GO_ANDROID_READ_TIMEOUT_MS, NULL, &events, NULL)) >= 0) { if (id != GO_ANDROID_SENSOR_LOOPER_ID) { continue; } diff --git a/MobileLibrary/go-mobile/exp/sensor/android.go b/MobileLibrary/go-mobile/exp/sensor/android.go index ad8a98b6e..a44861450 100644 --- a/MobileLibrary/go-mobile/exp/sensor/android.go +++ b/MobileLibrary/go-mobile/exp/sensor/android.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build android -// +build android package sensor diff --git a/MobileLibrary/go-mobile/exp/sensor/darwin_armx.go b/MobileLibrary/go-mobile/exp/sensor/darwin_armx.go index 2c8df049a..699a7aa72 100644 --- a/MobileLibrary/go-mobile/exp/sensor/darwin_armx.go +++ b/MobileLibrary/go-mobile/exp/sensor/darwin_armx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && (arm || arm64) -// +build darwin -// +build arm arm64 package sensor diff --git a/MobileLibrary/go-mobile/exp/sensor/notmobile.go b/MobileLibrary/go-mobile/exp/sensor/notmobile.go index 3362e548c..dc6f328c0 100644 --- a/MobileLibrary/go-mobile/exp/sensor/notmobile.go +++ b/MobileLibrary/go-mobile/exp/sensor/notmobile.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && !android) || (darwin && !arm && !arm64) || windows -// +build linux,!android darwin,!arm,!arm64 windows package sensor diff --git a/MobileLibrary/go-mobile/exp/sprite/glsprite/glsprite.go b/MobileLibrary/go-mobile/exp/sprite/glsprite/glsprite.go index 4f2522b54..1e9fa0cda 100644 --- a/MobileLibrary/go-mobile/exp/sprite/glsprite/glsprite.go +++ b/MobileLibrary/go-mobile/exp/sprite/glsprite/glsprite.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || windows -// +build darwin linux windows // Package glsprite implements a sprite Engine using OpenGL ES 2. // diff --git a/MobileLibrary/go-mobile/geom/geom.go b/MobileLibrary/go-mobile/geom/geom.go index 23cf67bea..1bfd14f3e 100644 --- a/MobileLibrary/go-mobile/geom/geom.go +++ b/MobileLibrary/go-mobile/geom/geom.go @@ -74,7 +74,7 @@ import "fmt" // // The unit Pt is a typographical point, 1/72 of an inch (0.3527 mm). // -// It can be be converted to a length in current device pixels by +// It can be converted to a length in current device pixels by // multiplying with PixelsPerPt after app initialization is complete. type Pt float32 diff --git a/MobileLibrary/go-mobile/gl/gendebug.go b/MobileLibrary/go-mobile/gl/gendebug.go index 2e6563cd8..146826f07 100644 --- a/MobileLibrary/go-mobile/gl/gendebug.go +++ b/MobileLibrary/go-mobile/gl/gendebug.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore // The gendebug program takes gl.go and generates a version of it // where each function includes tracing code that writes its arguments @@ -885,8 +884,7 @@ const preamble = `// Copyright 2014 The Go Authors. All rights reserved. // Code generated from gl.go using go generate. DO NOT EDIT. // See doc.go for details. -// +build darwin linux openbsd windows -// +build gldebug +//go:build (darwin || linux || openbsd || windows) && gldebug package gl diff --git a/MobileLibrary/go-mobile/gl/gl.go b/MobileLibrary/go-mobile/gl/gl.go index 195a30919..28e747b09 100644 --- a/MobileLibrary/go-mobile/gl/gl.go +++ b/MobileLibrary/go-mobile/gl/gl.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || linux || openbsd || windows) && !gldebug -// +build darwin linux openbsd windows -// +build !gldebug package gl diff --git a/MobileLibrary/go-mobile/gl/gldebug.go b/MobileLibrary/go-mobile/gl/gldebug.go index 5a4bf71da..fb79bb7ac 100644 --- a/MobileLibrary/go-mobile/gl/gldebug.go +++ b/MobileLibrary/go-mobile/gl/gldebug.go @@ -6,8 +6,6 @@ // See doc.go for details. //go:build (darwin || linux || openbsd || windows) && gldebug -// +build darwin linux openbsd windows -// +build gldebug package gl diff --git a/MobileLibrary/go-mobile/gl/types_debug.go b/MobileLibrary/go-mobile/gl/types_debug.go index b6dbaaad0..ea011704e 100644 --- a/MobileLibrary/go-mobile/gl/types_debug.go +++ b/MobileLibrary/go-mobile/gl/types_debug.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || linux || openbsd || windows) && gldebug -// +build darwin linux openbsd windows -// +build gldebug package gl diff --git a/MobileLibrary/go-mobile/gl/types_prod.go b/MobileLibrary/go-mobile/gl/types_prod.go index e1c8b423c..f87f91f75 100644 --- a/MobileLibrary/go-mobile/gl/types_prod.go +++ b/MobileLibrary/go-mobile/gl/types_prod.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || linux || openbsd || windows) && !gldebug -// +build darwin linux openbsd windows -// +build !gldebug package gl diff --git a/MobileLibrary/go-mobile/gl/work.go b/MobileLibrary/go-mobile/gl/work.go index 65eaf9a23..c603adfe6 100644 --- a/MobileLibrary/go-mobile/gl/work.go +++ b/MobileLibrary/go-mobile/gl/work.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || openbsd -// +build darwin linux openbsd package gl diff --git a/MobileLibrary/go-mobile/gl/work_other.go b/MobileLibrary/go-mobile/gl/work_other.go index b4b48f445..3bc597d43 100644 --- a/MobileLibrary/go-mobile/gl/work_other.go +++ b/MobileLibrary/go-mobile/gl/work_other.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!cgo || (!darwin && !linux && !openbsd)) && !windows -// +build !cgo !darwin,!linux,!openbsd -// +build !windows package gl diff --git a/MobileLibrary/go-mobile/go.mod b/MobileLibrary/go-mobile/go.mod index 4cbf6276e..b63212b84 100644 --- a/MobileLibrary/go-mobile/go.mod +++ b/MobileLibrary/go-mobile/go.mod @@ -1,13 +1,13 @@ module golang.org/x/mobile -go 1.17 +go 1.18 require ( - golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56 - golang.org/x/image v0.0.0-20190802002840-cff245a6509b - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 - golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde - golang.org/x/tools v0.1.12 + golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/image v0.17.0 + golang.org/x/mod v0.18.0 + golang.org/x/sync v0.7.0 + golang.org/x/tools v0.22.0 ) -require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect +require golang.org/x/sys v0.21.0 // indirect diff --git a/MobileLibrary/go-mobile/go.sum b/MobileLibrary/go-mobile/go.sum index 7a596ad12..ed74aec4e 100644 --- a/MobileLibrary/go-mobile/go.sum +++ b/MobileLibrary/go-mobile/go.sum @@ -1,41 +1,12 @@ -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56 h1:estk1glOnSVeJ9tdEZZc5mAMDZk5lNJNyJ6DvrBkTEU= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63 h1:3AGKexOYqL+ztdWdkB1bDwXgPBuTS/S8A4WzuTvJ8Cg= +golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= +golang.org/x/image v0.17.0 h1:nTRVVdajgB8zCMZVsViyzhnMKPwYeroEERRC64JuLco= +golang.org/x/image v0.17.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= diff --git a/MobileLibrary/go-mobile/internal/binres/genarsc.go b/MobileLibrary/go-mobile/internal/binres/genarsc.go index 4ec35fbc5..2e71f29f0 100644 --- a/MobileLibrary/go-mobile/internal/binres/genarsc.go +++ b/MobileLibrary/go-mobile/internal/binres/genarsc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore // Genarsc generates stripped down version of android.jar resources used // for validation of manifest entries. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/AUTHORS b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/CONTRIBUTORS b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/PATENTS b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/LICENSE b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/LICENSE similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/exp/LICENSE rename to MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/LICENSE diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.go index b258bdc91..99dad1317 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin -// +build 386 amd64 -// +build !ios +//go:build darwin && !ios package gldriver @@ -290,7 +288,7 @@ func mouseEvent(id uintptr, x, y, dx, dy float32, ty, button int32, flags uint32 // can produce wheel events in opposite directions, but the // direction matches what other programs on the OS do. // - // If we wanted to expose the phsyical device motion in the + // If we wanted to expose the physical device motion in the // event we could use [NSEvent isDirectionInvertedFromDevice] // to know if "natural scrolling" is enabled. // @@ -412,6 +410,7 @@ func cocoaRune(r rune) rune { // into the standard keycodes used by the key package. // // To get a sense of the key map, see the diagram on +// // http://boredzo.org/blog/archives/2007-05-22/virtual-key-codes func cocoaKeyCode(vkcode uint16) key.Code { switch vkcode { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.m b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.m index 4b48e7bda..2c8fd8376 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.m +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/cocoa.m @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // +build darwin -// +build 386 amd64 // +build !ios #include "_cgo_export.h" @@ -34,7 +33,6 @@ void makeCurrentContext(uintptr_t context) { NSOpenGLContext* ctx = (NSOpenGLContext*)context; [ctx makeCurrentContext]; - [ctx update]; } void flushContext(uintptr_t context) { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/context.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/context.go index 197be350a..488a38867 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/context.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/context.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !android +//go:build !android package gldriver diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/other.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/other.go index bbfc03418..f762750ac 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/other.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/other.go @@ -2,10 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin !386,!amd64 ios -// +build !linux android -// +build !windows -// +build !openbsd +//go:build (!darwin || ios || !cgo) && (!linux || android || !cgo) && (!openbsd || !cgo) && !windows package gldriver @@ -19,13 +16,14 @@ import ( const useLifecycler = true const handleSizeEventsAtChannelReceive = true -func newWindow(opts *screen.NewWindowOptions) (uintptr, error) { return 0, nil } +var errUnsupported = fmt.Errorf("gldriver: unsupported GOOS/GOARCH %s/%s or cgo not enabled", runtime.GOOS, runtime.GOARCH) + +func newWindow(opts *screen.NewWindowOptions) (uintptr, error) { return 0, errUnsupported } func initWindow(id *windowImpl) {} func showWindow(id *windowImpl) {} func closeWindow(id uintptr) {} func drawLoop(w *windowImpl) {} -func main(f func(screen.Screen)) error { - return fmt.Errorf("gldriver: unsupported GOOS/GOARCH %s/%s", runtime.GOOS, runtime.GOARCH) -} +func surfaceCreate() error { return errUnsupported } +func main(f func(screen.Screen)) error { return errUnsupported } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/win32.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/win32.go index 343fb4505..c94549ed5 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/win32.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/win32.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows +//go:build windows package gldriver diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.c b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.c index dff6d2d64..e7fb2896c 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.c +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.c @@ -6,6 +6,8 @@ #include "_cgo_export.h" #include +#include // for Atom, Colormap, Display, Window +#include // for XVisualInfo #include #include #include diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.go index f73098d69..0ccdd59fc 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/gldriver/x11.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!android openbsd +//go:build (linux && !android) || openbsd package gldriver @@ -194,8 +194,8 @@ func onExpose(id uintptr) { //export onKeysym func onKeysym(k, unshifted, shifted uint32) { - theKeysyms[k][0] = unshifted - theKeysyms[k][1] = shifted + theKeysyms.Table[k][0] = unshifted + theKeysyms.Table[k][1] = shifted } //export onKey @@ -208,7 +208,7 @@ func onKey(id uintptr, state uint16, detail, dir uint8) { return } - r, c := theKeysyms.Lookup(detail, state, 0) + r, c := theKeysyms.Lookup(detail, state) w.Send(key.Event{ Rune: r, Code: c, diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/key.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/key.go index c7b9e383e..da75d9a96 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/key.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/key.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows +//go:build windows package win32 @@ -332,14 +332,14 @@ func sendKeyEvent(hwnd syscall.Handle, uMsg uint32, wParam, lParam uintptr) (lRe Modifiers: keyModifiers(), } switch uMsg { - case _WM_KEYDOWN: + case _WM_KEYDOWN, _WM_SYSKEYDOWN: const prevMask = 1 << 30 if repeat := lParam&prevMask == prevMask; repeat { e.Direction = key.DirNone } else { e.Direction = key.DirPress } - case _WM_KEYUP: + case _WM_KEYUP, _WM_SYSKEYUP: e.Direction = key.DirRelease default: panic(fmt.Sprintf("win32: unexpected key message: %d", uMsg)) diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/win32.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/win32.go index 692508508..c020ce693 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/win32.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/win32/win32.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows +//go:build windows // Package win32 implements a partial shiny screen driver using the Win32 API. // It provides window, lifecycle, key, and mouse management, but no drawing. @@ -331,9 +331,10 @@ var windowMsgs = map[uint32]func(hwnd syscall.Handle, uMsg uint32, wParam, lPara _WM_MOUSEMOVE: sendMouseEvent, _WM_MOUSEWHEEL: sendMouseEvent, - _WM_KEYDOWN: sendKeyEvent, - _WM_KEYUP: sendKeyEvent, - // TODO case _WM_SYSKEYDOWN, _WM_SYSKEYUP: + _WM_KEYDOWN: sendKeyEvent, + _WM_KEYUP: sendKeyEvent, + _WM_SYSKEYDOWN: sendKeyEvent, + _WM_SYSKEYUP: sendKeyEvent, } func AddWindowMsg(fn func(hwnd syscall.Handle, uMsg uint32, wParam, lParam uintptr)) uint32 { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/x11key/x11key.go b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/x11key/x11key.go index b916a44c7..546d46306 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/x11key/x11key.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/exp/shiny/driver/internal/x11key/x11key.go @@ -30,18 +30,30 @@ const ( Button5Mask = 1 << 12 ) -type KeysymTable [256][2]uint32 +type KeysymTable struct { + Table [256][6]uint32 + + NumLockMod, ModeSwitchMod, ISOLevel3ShiftMod uint16 +} + +func (t *KeysymTable) Lookup(detail uint8, state uint16) (rune, key.Code) { + te := t.Table[detail][0:2] + if state&t.ModeSwitchMod != 0 { + te = t.Table[detail][2:4] + } + if state&t.ISOLevel3ShiftMod != 0 { + te = t.Table[detail][4:6] + } -func (t *KeysymTable) Lookup(detail uint8, state uint16, numLockMod uint16) (rune, key.Code) { // The key event's rune depends on whether the shift key is down. - unshifted := rune(t[detail][0]) + unshifted := rune(te[0]) r := unshifted - if state&numLockMod != 0 && isKeypad(t[detail][1]) { + if state&t.NumLockMod != 0 && isKeypad(te[1]) { if state&ShiftMask == 0 { - r = rune(t[detail][1]) + r = rune(te[1]) } } else if state&ShiftMask != 0 { - r = rune(t[detail][1]) + r = rune(te[1]) // In X11, a zero keysym when shift is down means to use what the // keysym is when shift is up. if r == 0 { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/image/AUTHORS b/MobileLibrary/go-mobile/vendor/golang.org/x/image/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/image/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/image/CONTRIBUTORS b/MobileLibrary/go-mobile/vendor/golang.org/x/image/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/image/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/draw.go b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/draw.go index cd5aaba64..42d5d7e09 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/draw.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/draw.go @@ -47,6 +47,12 @@ func (floydSteinberg) Draw(dst Image, r image.Rectangle, src image.Image, sp ima // Image is an image.Image with a Set method to change a single pixel. type Image = draw.Image +// RGBA64Image extends both the Image and image.RGBA64Image interfaces with a +// SetRGBA64 method to change a single pixel. SetRGBA64 is equivalent to +// calling Set, but it can avoid allocations from converting concrete color +// types to the color.Color interface type. +type RGBA64Image = draw.RGBA64Image + // Op is a Porter-Duff compositing operator. type Op = draw.Op diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/impl.go b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/impl.go index 75498adbd..94ee8265b 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/impl.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/impl.go @@ -59,9 +59,16 @@ func (z nnInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) case *image.RGBA: z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + case image.RGBA64Image: + z.scale_RGBA_RGBA64Image_Over(dst, dr, adr, src, sr, &o) default: z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.scale_RGBA64Image_RGBA64Image_Over(dst, dr, adr, src, sr, &o) + } default: switch src := src.(type) { default: @@ -91,9 +98,16 @@ func (z nnInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, sr case image.YCbCrSubsampleRatio440: z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) } + case image.RGBA64Image: + z.scale_RGBA_RGBA64Image_Src(dst, dr, adr, src, sr, &o) default: z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.scale_RGBA64Image_RGBA64Image_Src(dst, dr, adr, src, sr, &o) + } default: switch src := src.(type) { default: @@ -170,9 +184,16 @@ func (z nnInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr i z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) case *image.RGBA: z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.RGBA64Image: + z.transform_RGBA_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) default: z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.transform_RGBA64Image_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } default: switch src := src.(type) { default: @@ -202,9 +223,16 @@ func (z nnInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr i case image.YCbCrSubsampleRatio440: z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) } + case image.RGBA64Image: + z.transform_RGBA_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) default: z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.transform_RGBA64Image_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } default: switch src := src.(type) { default: @@ -502,6 +530,45 @@ func (nnInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rec } } +func (nnInterpolator) scale_RGBA_RGBA64Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + p := src.RGBA64At(sr.Min.X+int(sx), sr.Min.Y+int(sy)) + pa1 := (0xffff - uint32(p.A)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + uint32(p.R)) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + uint32(p.G)) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + uint32(p.B)) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + uint32(p.A)) >> 8) + } + } +} + +func (nnInterpolator) scale_RGBA_RGBA64Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (2*uint64(dx) + 1) * sw / dw2 + p := src.RGBA64At(sr.Min.X+int(sx), sr.Min.Y+int(sy)) + dst.Pix[d+0] = uint8(p.R >> 8) + dst.Pix[d+1] = uint8(p.G >> 8) + dst.Pix[d+2] = uint8(p.B >> 8) + dst.Pix[d+3] = uint8(p.A >> 8) + } + } +} + func (nnInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { dw2 := uint64(dr.Dx()) * 2 dh2 := uint64(dr.Dy()) * 2 @@ -541,6 +608,86 @@ func (nnInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectan } } +func (nnInterpolator) scale_RGBA64Image_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + p := src.RGBA64At(sr.Min.X+int(sx), sr.Min.Y+int(sy)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + pa1 := 0xffff - uint32(p.A) + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } + } +} + +func (nnInterpolator) scale_RGBA64Image_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + dw2 := uint64(dr.Dx()) * 2 + dh2 := uint64(dr.Dy()) * 2 + sw := uint64(sr.Dx()) + sh := uint64(sr.Dy()) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (2*uint64(dy) + 1) * sh / dh2 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (2*uint64(dx) + 1) * sw / dw2 + p := src.RGBA64At(sr.Min.X+int(sx), sr.Min.Y+int(sy)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx), smp.Y+sr.Min.Y+int(sy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } else { + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), p) + } + } + } +} + func (nnInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { dw2 := uint64(dr.Dx()) * 2 dh2 := uint64(dr.Dy()) * 2 @@ -921,6 +1068,47 @@ func (nnInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image } } +func (nnInterpolator) transform_RGBA_RGBA64Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + p := src.RGBA64At(sx0, sy0) + pa1 := (0xffff - uint32(p.A)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + uint32(p.R)) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + uint32(p.G)) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + uint32(p.B)) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + uint32(p.A)) >> 8) + } + } +} + +func (nnInterpolator) transform_RGBA_RGBA64Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + p := src.RGBA64At(sx0, sy0) + dst.Pix[d+0] = uint8(p.R >> 8) + dst.Pix[d+1] = uint8(p.G >> 8) + dst.Pix[d+2] = uint8(p.B >> 8) + dst.Pix[d+3] = uint8(p.A >> 8) + } + } +} + func (nnInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { dyf := float64(dr.Min.Y+int(dy)) + 0.5 @@ -962,6 +1150,88 @@ func (nnInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Re } } +func (nnInterpolator) transform_RGBA64Image_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + p := src.RGBA64At(sx0, sy0) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + pa1 := 0xffff - uint32(p.A) + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } + } +} + +func (nnInterpolator) transform_RGBA64Image_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx0 := int(d2s[0]*dxf+d2s[1]*dyf+d2s[2]) + bias.X + sy0 := int(d2s[3]*dxf+d2s[4]*dyf+d2s[5]) + bias.Y + if !(image.Point{sx0, sy0}).In(sr) { + continue + } + p := src.RGBA64At(sx0, sy0) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } else { + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), p) + } + } + } +} + func (nnInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { srcMask, smp := opts.SrcMask, opts.SrcMaskP dstMask, dmp := opts.DstMask, opts.DstMaskP @@ -1097,9 +1367,16 @@ func (z ablInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, s z.scale_RGBA_NRGBA_Over(dst, dr, adr, src, sr, &o) case *image.RGBA: z.scale_RGBA_RGBA_Over(dst, dr, adr, src, sr, &o) + case image.RGBA64Image: + z.scale_RGBA_RGBA64Image_Over(dst, dr, adr, src, sr, &o) default: z.scale_RGBA_Image_Over(dst, dr, adr, src, sr, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.scale_RGBA64Image_RGBA64Image_Over(dst, dr, adr, src, sr, &o) + } default: switch src := src.(type) { default: @@ -1129,9 +1406,16 @@ func (z ablInterpolator) Scale(dst Image, dr image.Rectangle, src image.Image, s case image.YCbCrSubsampleRatio440: z.scale_RGBA_YCbCr440_Src(dst, dr, adr, src, sr, &o) } + case image.RGBA64Image: + z.scale_RGBA_RGBA64Image_Src(dst, dr, adr, src, sr, &o) default: z.scale_RGBA_Image_Src(dst, dr, adr, src, sr, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.scale_RGBA64Image_RGBA64Image_Src(dst, dr, adr, src, sr, &o) + } default: switch src := src.(type) { default: @@ -1208,9 +1492,16 @@ func (z ablInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr z.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) case *image.RGBA: z.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + case image.RGBA64Image: + z.transform_RGBA_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) default: z.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.transform_RGBA64Image_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, &o) + } default: switch src := src.(type) { default: @@ -1240,9 +1531,16 @@ func (z ablInterpolator) Transform(dst Image, s2d f64.Aff3, src image.Image, sr case image.YCbCrSubsampleRatio440: z.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, &o) } + case image.RGBA64Image: + z.transform_RGBA_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) default: z.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + z.transform_RGBA64Image_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, &o) + } default: switch src := src.(type) { default: @@ -2415,7 +2713,7 @@ func (ablInterpolator) scale_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Re } } -func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { +func (ablInterpolator) scale_RGBA_RGBA64Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { sw := int32(sr.Dx()) sh := int32(sr.Dy()) yscale := float64(sh) / float64(dr.Dy()) @@ -2454,30 +2752,30 @@ func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rect xFrac0, xFrac1 = 1, 0 } - s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() - s00r := float64(s00ru) - s00g := float64(s00gu) - s00b := float64(s00bu) - s00a := float64(s00au) - s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() - s10r := float64(s10ru) - s10g := float64(s10gu) - s10b := float64(s10bu) - s10a := float64(s10au) + s00u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)) + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)) + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) s10r = xFrac1*s00r + xFrac0*s10r s10g = xFrac1*s00g + xFrac0*s10g s10b = xFrac1*s00b + xFrac0*s10b s10a = xFrac1*s00a + xFrac0*s10a - s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() - s01r := float64(s01ru) - s01g := float64(s01gu) - s01b := float64(s01bu) - s01a := float64(s01au) - s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() - s11r := float64(s11ru) - s11g := float64(s11gu) - s11b := float64(s11bu) - s11a := float64(s11au) + s01u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)) + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)) + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) s11r = xFrac1*s01r + xFrac0*s11r s11g = xFrac1*s01g + xFrac0*s11g s11b = xFrac1*s01b + xFrac0*s11b @@ -2486,20 +2784,17 @@ func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rect s11g = yFrac1*s10g + yFrac0*s11g s11b = yFrac1*s10b + yFrac0*s11b s11a = yFrac1*s10a + yFrac0*s11a - pr := uint32(s11r) - pg := uint32(s11g) - pb := uint32(s11b) - pa := uint32(s11a) - pa1 := (0xffff - pa) * 0x101 - dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) - dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) - dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) - dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + pa1 := (0xffff - uint32(p.A)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + uint32(p.R)) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + uint32(p.G)) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + uint32(p.B)) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + uint32(p.A)) >> 8) } } } -func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { +func (ablInterpolator) scale_RGBA_RGBA64Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { sw := int32(sr.Dx()) sh := int32(sr.Dy()) yscale := float64(sh) / float64(dr.Dy()) @@ -2538,30 +2833,30 @@ func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Recta xFrac0, xFrac1 = 1, 0 } - s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() - s00r := float64(s00ru) - s00g := float64(s00gu) - s00b := float64(s00bu) - s00a := float64(s00au) - s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() - s10r := float64(s10ru) - s10g := float64(s10gu) - s10b := float64(s10bu) - s10a := float64(s10au) + s00u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)) + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)) + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) s10r = xFrac1*s00r + xFrac0*s10r s10g = xFrac1*s00g + xFrac0*s10g s10b = xFrac1*s00b + xFrac0*s10b s10a = xFrac1*s00a + xFrac0*s10a - s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() - s01r := float64(s01ru) - s01g := float64(s01gu) - s01b := float64(s01bu) - s01a := float64(s01au) - s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() - s11r := float64(s11ru) - s11g := float64(s11gu) - s11b := float64(s11bu) - s11a := float64(s11au) + s01u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)) + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)) + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) s11r = xFrac1*s01r + xFrac0*s11r s11g = xFrac1*s01g + xFrac0*s11g s11b = xFrac1*s01b + xFrac0*s11b @@ -2570,28 +2865,21 @@ func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Recta s11g = yFrac1*s10g + yFrac0*s11g s11b = yFrac1*s10b + yFrac0*s11b s11a = yFrac1*s10a + yFrac0*s11a - pr := uint32(s11r) - pg := uint32(s11g) - pb := uint32(s11b) - pa := uint32(s11a) - dst.Pix[d+0] = uint8(pr >> 8) - dst.Pix[d+1] = uint8(pg >> 8) - dst.Pix[d+2] = uint8(pb >> 8) - dst.Pix[d+3] = uint8(pa >> 8) + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + dst.Pix[d+0] = uint8(p.R >> 8) + dst.Pix[d+1] = uint8(p.G >> 8) + dst.Pix[d+2] = uint8(p.B >> 8) + dst.Pix[d+3] = uint8(p.A >> 8) } } } -func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { +func (ablInterpolator) scale_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { sw := int32(sr.Dx()) sh := int32(sr.Dy()) yscale := float64(sh) / float64(dr.Dy()) xscale := float64(sw) / float64(dr.Dx()) swMinus1, shMinus1 := sw-1, sh-1 - srcMask, smp := opts.SrcMask, opts.SrcMaskP - dstMask, dmp := opts.DstMask, opts.DstMaskP - dstColorRGBA64 := &color.RGBA64{} - dstColor := color.Color(dstColorRGBA64) for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { sy := (float64(dy)+0.5)*yscale - 0.5 @@ -2609,8 +2897,9 @@ func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle sy0, sy1 = shMinus1, shMinus1 yFrac0, yFrac1 = 1, 0 } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { sx := (float64(dx)+0.5)*xscale - 0.5 sx0 := int32(sx) xFrac0 := sx - float64(sx0) @@ -2625,25 +2914,11 @@ func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle } s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() - if srcMask != nil { - _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() - s00ru = s00ru * ma / 0xffff - s00gu = s00gu * ma / 0xffff - s00bu = s00bu * ma / 0xffff - s00au = s00au * ma / 0xffff - } s00r := float64(s00ru) s00g := float64(s00gu) s00b := float64(s00bu) s00a := float64(s00au) s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() - if srcMask != nil { - _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() - s10ru = s10ru * ma / 0xffff - s10gu = s10gu * ma / 0xffff - s10bu = s10bu * ma / 0xffff - s10au = s10au * ma / 0xffff - } s10r := float64(s10ru) s10g := float64(s10gu) s10b := float64(s10bu) @@ -2653,25 +2928,11 @@ func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle s10b = xFrac1*s00b + xFrac0*s10b s10a = xFrac1*s00a + xFrac0*s10a s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() - if srcMask != nil { - _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() - s01ru = s01ru * ma / 0xffff - s01gu = s01gu * ma / 0xffff - s01bu = s01bu * ma / 0xffff - s01au = s01au * ma / 0xffff - } s01r := float64(s01ru) s01g := float64(s01gu) s01b := float64(s01bu) s01a := float64(s01au) s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() - if srcMask != nil { - _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() - s11ru = s11ru * ma / 0xffff - s11gu = s11gu * ma / 0xffff - s11bu = s11bu * ma / 0xffff - s11au = s11au * ma / 0xffff - } s11r := float64(s11ru) s11g := float64(s11gu) s11b := float64(s11bu) @@ -2688,25 +2949,341 @@ func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle pg := uint32(s11g) pb := uint32(s11b) pa := uint32(s11a) - qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() - if dstMask != nil { + pa1 := (0xffff - pa) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa) >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + dst.Pix[d+0] = uint8(pr >> 8) + dst.Pix[d+1] = uint8(pg >> 8) + dst.Pix[d+2] = uint8(pb >> 8) + dst.Pix[d+3] = uint8(pa >> 8) + } + } +} + +func (ablInterpolator) scale_RGBA64Image_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00u.R = uint16(uint32(s00u.R) * ma / 0xffff) + s00u.G = uint16(uint32(s00u.G) * ma / 0xffff) + s00u.B = uint16(uint32(s00u.B) * ma / 0xffff) + s00u.A = uint16(uint32(s00u.A) * ma / 0xffff) + } + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10u.R = uint16(uint32(s10u.R) * ma / 0xffff) + s10u.G = uint16(uint32(s10u.G) * ma / 0xffff) + s10u.B = uint16(uint32(s10u.B) * ma / 0xffff) + s10u.A = uint16(uint32(s10u.A) * ma / 0xffff) + } + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01u.R = uint16(uint32(s01u.R) * ma / 0xffff) + s01u.G = uint16(uint32(s01u.G) * ma / 0xffff) + s01u.B = uint16(uint32(s01u.B) * ma / 0xffff) + s01u.A = uint16(uint32(s01u.A) * ma / 0xffff) + } + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11u.R = uint16(uint32(s11u.R) * ma / 0xffff) + s11u.G = uint16(uint32(s11u.G) * ma / 0xffff) + s11u.B = uint16(uint32(s11u.B) * ma / 0xffff) + s11u.A = uint16(uint32(s11u.A) * ma / 0xffff) + } + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + if dstMask != nil { _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() - pr = pr * ma / 0xffff - pg = pg * ma / 0xffff - pb = pb * ma / 0xffff - pa = pa * ma / 0xffff + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + pa1 := 0xffff - uint32(p.A) + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } + } +} + +func (ablInterpolator) scale_RGBA64Image_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00u.R = uint16(uint32(s00u.R) * ma / 0xffff) + s00u.G = uint16(uint32(s00u.G) * ma / 0xffff) + s00u.B = uint16(uint32(s00u.B) * ma / 0xffff) + s00u.A = uint16(uint32(s00u.A) * ma / 0xffff) + } + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10u.R = uint16(uint32(s10u.R) * ma / 0xffff) + s10u.G = uint16(uint32(s10u.G) * ma / 0xffff) + s10u.B = uint16(uint32(s10u.B) * ma / 0xffff) + s10u.A = uint16(uint32(s10u.A) * ma / 0xffff) + } + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01u.R = uint16(uint32(s01u.R) * ma / 0xffff) + s01u.G = uint16(uint32(s01u.G) * ma / 0xffff) + s01u.B = uint16(uint32(s01u.B) * ma / 0xffff) + s01u.A = uint16(uint32(s01u.A) * ma / 0xffff) + } + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11u.R = uint16(uint32(s11u.R) * ma / 0xffff) + s11u.G = uint16(uint32(s11u.G) * ma / 0xffff) + s11u.B = uint16(uint32(s11u.B) * ma / 0xffff) + s11u.A = uint16(uint32(s11u.A) * ma / 0xffff) + } + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } else { + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), p) } - pa1 := 0xffff - pa - dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) - dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) - dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) - dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) } } } -func (ablInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { +func (ablInterpolator) scale_Image_Image_Over(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { sw := int32(sr.Dx()) sh := int32(sr.Dy()) yscale := float64(sh) / float64(dr.Dy()) @@ -2812,35 +3389,159 @@ func (ablInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, pg := uint32(s11g) pb := uint32(s11b) pa := uint32(s11a) + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() if dstMask != nil { - qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() pr = pr * ma / 0xffff pg = pg * ma / 0xffff pb = pb * ma / 0xffff pa = pa * ma / 0xffff - pa1 := 0xffff - ma - dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) - dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) - dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) - dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) - } else { - dstColorRGBA64.R = uint16(pr) - dstColorRGBA64.G = uint16(pg) - dstColorRGBA64.B = uint16(pb) - dstColorRGBA64.A = uint16(pa) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) } + pa1 := 0xffff - pa + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) } } } -func (ablInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { - for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { - dyf := float64(dr.Min.Y+int(dy)) + 0.5 - d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { +func (ablInterpolator) scale_Image_Image_Src(dst Image, dr, adr image.Rectangle, src image.Image, sr image.Rectangle, opts *Options) { + sw := int32(sr.Dx()) + sh := int32(sr.Dy()) + yscale := float64(sh) / float64(dr.Dy()) + xscale := float64(sw) / float64(dr.Dx()) + swMinus1, shMinus1 := sw-1, sh-1 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + sy := (float64(dy)+0.5)*yscale - 0.5 + // If sy < 0, we will clamp sy0 to 0 anyway, so it doesn't matter if + // we say int32(sy) instead of int32(math.Floor(sy)). Similarly for + // sx, below. + sy0 := int32(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy1 := sy0 + 1 + if sy < 0 { + sy0, sy1 = 0, 0 + yFrac0, yFrac1 = 0, 1 + } else if sy1 > shMinus1 { + sy0, sy1 = shMinus1, shMinus1 + yFrac0, yFrac1 = 1, 0 + } + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + sx := (float64(dx)+0.5)*xscale - 0.5 + sx0 := int32(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx1 := sx0 + 1 + if sx < 0 { + sx0, sx1 = 0, 0 + xFrac0, xFrac1 = 0, 1 + } else if sx1 > swMinus1 { + sx0, sx1 = swMinus1, swMinus1 + xFrac0, xFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) + s00g := float64(s00gu) + s00b := float64(s00bu) + s00a := float64(s00au) + s10ru, s10gu, s10bu, s10au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy0)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy0)).RGBA() + s10ru = s10ru * ma / 0xffff + s10gu = s10gu * ma / 0xffff + s10bu = s10bu * ma / 0xffff + s10au = s10au * ma / 0xffff + } + s10r := float64(s10ru) + s10g := float64(s10gu) + s10b := float64(s10bu) + s10a := float64(s10au) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01ru, s01gu, s01bu, s01au := src.At(sr.Min.X+int(sx0), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx0), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s01ru = s01ru * ma / 0xffff + s01gu = s01gu * ma / 0xffff + s01bu = s01bu * ma / 0xffff + s01au = s01au * ma / 0xffff + } + s01r := float64(s01ru) + s01g := float64(s01gu) + s01b := float64(s01bu) + s01a := float64(s01au) + s11ru, s11gu, s11bu, s11au := src.At(sr.Min.X+int(sx1), sr.Min.Y+int(sy1)).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(sx1), smp.Y+sr.Min.Y+int(sy1)).RGBA() + s11ru = s11ru * ma / 0xffff + s11gu = s11gu * ma / 0xffff + s11bu = s11bu * ma / 0xffff + s11au = s11au * ma / 0xffff + } + s11r := float64(s11ru) + s11g := float64(s11gu) + s11b := float64(s11bu) + s11a := float64(s11au) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + pr := uint32(s11r) + pg := uint32(s11g) + pb := uint32(s11b) + pa := uint32(s11a) + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr = pr * ma / 0xffff + pg = pg * ma / 0xffff + pb = pb * ma / 0xffff + pa = pa * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } else { + dstColorRGBA64.R = uint16(pr) + dstColorRGBA64.G = uint16(pg) + dstColorRGBA64.B = uint16(pb) + dstColorRGBA64.A = uint16(pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColor) + } + } + } +} + +func (ablInterpolator) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { dxf := float64(dr.Min.X+int(dx)) + 0.5 sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] @@ -4007,6 +4708,169 @@ func (ablInterpolator) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr imag } } +func (ablInterpolator) transform_RGBA_RGBA64Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00u := src.RGBA64At(sx0, sy0) + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sx1, sy0) + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sx0, sy1) + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sx1, sy1) + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + pa1 := (0xffff - uint32(p.A)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + uint32(p.R)) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + uint32(p.G)) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + uint32(p.B)) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + uint32(p.A)) >> 8) + } + } +} + +func (ablInterpolator) transform_RGBA_RGBA64Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00u := src.RGBA64At(sx0, sy0) + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sx1, sy0) + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sx0, sy1) + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sx1, sy1) + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + dst.Pix[d+0] = uint8(p.R >> 8) + dst.Pix[d+1] = uint8(p.G >> 8) + dst.Pix[d+2] = uint8(p.B >> 8) + dst.Pix[d+3] = uint8(p.A >> 8) + } + } +} + func (ablInterpolator) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { dyf := float64(dr.Min.Y+int(dy)) + 0.5 @@ -4176,11 +5040,11 @@ func (ablInterpolator) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.R } } -func (ablInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { +func (ablInterpolator) transform_RGBA64Image_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { srcMask, smp := opts.SrcMask, opts.SrcMaskP dstMask, dmp := opts.DstMask, opts.DstMaskP - dstColorRGBA64 := &color.RGBA64{} - dstColor := color.Color(dstColorRGBA64) + dstColorRGBA64 := color.RGBA64{} + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { dyf := float64(dr.Min.Y+int(dy)) + 0.5 for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { @@ -4219,15 +5083,261 @@ func (ablInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Recta yFrac0, yFrac1 = 1, 0 } - s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + s00u := src.RGBA64At(sx0, sy0) if srcMask != nil { _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() - s00ru = s00ru * ma / 0xffff - s00gu = s00gu * ma / 0xffff - s00bu = s00bu * ma / 0xffff - s00au = s00au * ma / 0xffff - } - s00r := float64(s00ru) + s00u.R = uint16(uint32(s00u.R) * ma / 0xffff) + s00u.G = uint16(uint32(s00u.G) * ma / 0xffff) + s00u.B = uint16(uint32(s00u.B) * ma / 0xffff) + s00u.A = uint16(uint32(s00u.A) * ma / 0xffff) + } + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sx1, sy0) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10u.R = uint16(uint32(s10u.R) * ma / 0xffff) + s10u.G = uint16(uint32(s10u.G) * ma / 0xffff) + s10u.B = uint16(uint32(s10u.B) * ma / 0xffff) + s10u.A = uint16(uint32(s10u.A) * ma / 0xffff) + } + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sx0, sy1) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01u.R = uint16(uint32(s01u.R) * ma / 0xffff) + s01u.G = uint16(uint32(s01u.G) * ma / 0xffff) + s01u.B = uint16(uint32(s01u.B) * ma / 0xffff) + s01u.A = uint16(uint32(s01u.A) * ma / 0xffff) + } + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sx1, sy1) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11u.R = uint16(uint32(s11u.R) * ma / 0xffff) + s11u.G = uint16(uint32(s11u.G) * ma / 0xffff) + s11u.B = uint16(uint32(s11u.B) * ma / 0xffff) + s11u.A = uint16(uint32(s11u.A) * ma / 0xffff) + } + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + } + pa1 := 0xffff - uint32(p.A) + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } + } +} + +func (ablInterpolator) transform_RGBA64Image_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00u := src.RGBA64At(sx0, sy0) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00u.R = uint16(uint32(s00u.R) * ma / 0xffff) + s00u.G = uint16(uint32(s00u.G) * ma / 0xffff) + s00u.B = uint16(uint32(s00u.B) * ma / 0xffff) + s00u.A = uint16(uint32(s00u.A) * ma / 0xffff) + } + s00r := float64(s00u.R) + s00g := float64(s00u.G) + s00b := float64(s00u.B) + s00a := float64(s00u.A) + s10u := src.RGBA64At(sx1, sy0) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy0).RGBA() + s10u.R = uint16(uint32(s10u.R) * ma / 0xffff) + s10u.G = uint16(uint32(s10u.G) * ma / 0xffff) + s10u.B = uint16(uint32(s10u.B) * ma / 0xffff) + s10u.A = uint16(uint32(s10u.A) * ma / 0xffff) + } + s10r := float64(s10u.R) + s10g := float64(s10u.G) + s10b := float64(s10u.B) + s10a := float64(s10u.A) + s10r = xFrac1*s00r + xFrac0*s10r + s10g = xFrac1*s00g + xFrac0*s10g + s10b = xFrac1*s00b + xFrac0*s10b + s10a = xFrac1*s00a + xFrac0*s10a + s01u := src.RGBA64At(sx0, sy1) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy1).RGBA() + s01u.R = uint16(uint32(s01u.R) * ma / 0xffff) + s01u.G = uint16(uint32(s01u.G) * ma / 0xffff) + s01u.B = uint16(uint32(s01u.B) * ma / 0xffff) + s01u.A = uint16(uint32(s01u.A) * ma / 0xffff) + } + s01r := float64(s01u.R) + s01g := float64(s01u.G) + s01b := float64(s01u.B) + s01a := float64(s01u.A) + s11u := src.RGBA64At(sx1, sy1) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx1, smp.Y+sy1).RGBA() + s11u.R = uint16(uint32(s11u.R) * ma / 0xffff) + s11u.G = uint16(uint32(s11u.G) * ma / 0xffff) + s11u.B = uint16(uint32(s11u.B) * ma / 0xffff) + s11u.A = uint16(uint32(s11u.A) * ma / 0xffff) + } + s11r := float64(s11u.R) + s11g := float64(s11u.G) + s11b := float64(s11u.B) + s11a := float64(s11u.A) + s11r = xFrac1*s01r + xFrac0*s11r + s11g = xFrac1*s01g + xFrac0*s11g + s11b = xFrac1*s01b + xFrac0*s11b + s11a = xFrac1*s01a + xFrac0*s11a + s11r = yFrac1*s10r + yFrac0*s11r + s11g = yFrac1*s10g + yFrac0*s11g + s11b = yFrac1*s10b + yFrac0*s11b + s11a = yFrac1*s10a + yFrac0*s11a + p := color.RGBA64{uint16(s11r), uint16(s11g), uint16(s11b), uint16(s11a)} + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + p.R = uint16(uint32(p.R) * ma / 0xffff) + p.G = uint16(uint32(p.G) * ma / 0xffff) + p.B = uint16(uint32(p.B) * ma / 0xffff) + p.A = uint16(uint32(p.A) * ma / 0xffff) + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + uint32(p.R)) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + uint32(p.G)) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + uint32(p.B)) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + uint32(p.A)) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } else { + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(dy), p) + } + } + } +} + +func (ablInterpolator) transform_Image_Image_Over(dst Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, opts *Options) { + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + sx -= 0.5 + sx0 := int(sx) + xFrac0 := sx - float64(sx0) + xFrac1 := 1 - xFrac0 + sx0 += bias.X + sx1 := sx0 + 1 + if sx0 < sr.Min.X { + sx0, sx1 = sr.Min.X, sr.Min.X + xFrac0, xFrac1 = 0, 1 + } else if sx1 >= sr.Max.X { + sx0, sx1 = sr.Max.X-1, sr.Max.X-1 + xFrac0, xFrac1 = 1, 0 + } + + sy -= 0.5 + sy0 := int(sy) + yFrac0 := sy - float64(sy0) + yFrac1 := 1 - yFrac0 + sy0 += bias.Y + sy1 := sy0 + 1 + if sy0 < sr.Min.Y { + sy0, sy1 = sr.Min.Y, sr.Min.Y + yFrac0, yFrac1 = 0, 1 + } else if sy1 >= sr.Max.Y { + sy0, sy1 = sr.Max.Y-1, sr.Max.Y-1 + yFrac0, yFrac1 = 1, 0 + } + + s00ru, s00gu, s00bu, s00au := src.At(sx0, sy0).RGBA() + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sx0, smp.Y+sy0).RGBA() + s00ru = s00ru * ma / 0xffff + s00gu = s00gu * ma / 0xffff + s00bu = s00bu * ma / 0xffff + s00au = s00au * ma / 0xffff + } + s00r := float64(s00ru) s00g := float64(s00gu) s00b := float64(s00bu) s00a := float64(s00au) @@ -4500,6 +5610,8 @@ func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr case image.YCbCrSubsampleRatio440: z.scaleX_YCbCr440(tmp, src, sr, &o) } + case image.RGBA64Image: + z.scaleX_RGBA64Image(tmp, src, sr, &o) default: z.scaleX_Image(tmp, src, sr, &o) } @@ -4518,6 +5630,8 @@ func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr switch dst := dst.(type) { case *image.RGBA: z.scaleY_RGBA_Over(dst, dr, adr, tmp, &o) + case RGBA64Image: + z.scaleY_RGBA64Image_Over(dst, dr, adr, tmp, &o) default: z.scaleY_Image_Over(dst, dr, adr, tmp, &o) } @@ -4525,6 +5639,8 @@ func (z *kernelScaler) Scale(dst Image, dr image.Rectangle, src image.Image, sr switch dst := dst.(type) { case *image.RGBA: z.scaleY_RGBA_Src(dst, dr, adr, tmp, &o) + case RGBA64Image: + z.scaleY_RGBA64Image_Src(dst, dr, adr, tmp, &o) default: z.scaleY_Image_Src(dst, dr, adr, tmp, &o) } @@ -4600,9 +5716,16 @@ func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Re q.transform_RGBA_NRGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) case *image.RGBA: q.transform_RGBA_RGBA_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + case image.RGBA64Image: + q.transform_RGBA_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) default: q.transform_RGBA_Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + q.transform_RGBA64Image_RGBA64Image_Over(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } default: switch src := src.(type) { default: @@ -4632,9 +5755,16 @@ func (q *Kernel) Transform(dst Image, s2d f64.Aff3, src image.Image, sr image.Re case image.YCbCrSubsampleRatio440: q.transform_RGBA_YCbCr440_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) } + case image.RGBA64Image: + q.transform_RGBA_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) default: q.transform_RGBA_Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) } + case RGBA64Image: + switch src := src.(type) { + case image.RGBA64Image: + q.transform_RGBA64Image_RGBA64Image_Src(dst, dr, adr, &d2s, src, sr, bias, xscale, yscale, &o) + } default: switch src := src.(type) { default: @@ -4909,6 +6039,37 @@ func (z *kernelScaler) scaleX_YCbCr440(tmp [][4]float64, src *image.YCbCr, sr im } } +func (z *kernelScaler) scaleX_RGBA64Image(tmp [][4]float64, src image.RGBA64Image, sr image.Rectangle, opts *Options) { + t := 0 + srcMask, smp := opts.SrcMask, opts.SrcMaskP + for y := int32(0); y < z.sh; y++ { + for _, s := range z.horizontal.sources { + var pr, pg, pb, pa float64 + for _, c := range z.horizontal.contribs[s.i:s.j] { + pu := src.RGBA64At(sr.Min.X+int(c.coord), sr.Min.Y+int(y)) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+sr.Min.X+int(c.coord), smp.Y+sr.Min.Y+int(y)).RGBA() + pu.R = uint16(uint32(pu.R) * ma / 0xffff) + pu.G = uint16(uint32(pu.G) * ma / 0xffff) + pu.B = uint16(uint32(pu.B) * ma / 0xffff) + pu.A = uint16(uint32(pu.A) * ma / 0xffff) + } + pr += float64(pu.R) * c.weight + pg += float64(pu.G) * c.weight + pb += float64(pu.B) * c.weight + pa += float64(pu.A) * c.weight + } + tmp[t] = [4]float64{ + pr * s.invTotalWeightFFFF, + pg * s.invTotalWeightFFFF, + pb * s.invTotalWeightFFFF, + pa * s.invTotalWeightFFFF, + } + t++ + } + } +} + func (z *kernelScaler) scaleX_Image(tmp [][4]float64, src image.Image, sr image.Rectangle, opts *Options) { t := 0 srcMask, smp := opts.SrcMask, opts.SrcMaskP @@ -4945,12 +6106,456 @@ func (z *kernelScaler) scaleY_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { var pr, pg, pb, pa float64 - for _, c := range z.vertical.contribs[s.i:s.j] { - p := &tmp[c.coord*z.dw+dx] - pr += p[0] * c.weight - pg += p[1] * c.weight - pb += p[2] * c.weight - pa += p[3] * c.weight + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 + for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + dst.Pix[d+0] = uint8(ftou(pr*s.invTotalWeight) >> 8) + dst.Pix[d+1] = uint8(ftou(pg*s.invTotalWeight) >> 8) + dst.Pix[d+2] = uint8(ftou(pb*s.invTotalWeight) >> 8) + dst.Pix[d+3] = uint8(ftou(pa*s.invTotalWeight) >> 8) + d += dst.Stride + } + } +} + +func (z *kernelScaler) scaleY_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)) + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + pa0) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColorRGBA64) + } + } +} + +func (z *kernelScaler) scaleY_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr := uint32(ftou(pr*s.invTotalWeight)) * ma / 0xffff + pg := uint32(ftou(pg*s.invTotalWeight)) * ma / 0xffff + pb := uint32(ftou(pb*s.invTotalWeight)) * ma / 0xffff + pa := uint32(ftou(pa*s.invTotalWeight)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + pa) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColorRGBA64) + } else { + dstColorRGBA64.R = ftou(pr * s.invTotalWeight) + dstColorRGBA64.G = ftou(pg * s.invTotalWeight) + dstColorRGBA64.B = ftou(pb * s.invTotalWeight) + dstColorRGBA64.A = ftou(pa * s.invTotalWeight) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColorRGBA64) + } + } + } +} + +func (z *kernelScaler) scaleY_Image_Over(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 := uint32(ftou(pr * s.invTotalWeight)) + pg0 := uint32(ftou(pg * s.invTotalWeight)) + pb0 := uint32(ftou(pb * s.invTotalWeight)) + pa0 := uint32(ftou(pa * s.invTotalWeight)) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } +} + +func (z *kernelScaler) scaleY_Image_Src(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := &color.RGBA64{} + dstColor := color.Color(dstColorRGBA64) + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { + for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { + var pr, pg, pb, pa float64 + for _, c := range z.vertical.contribs[s.i:s.j] { + p := &tmp[c.coord*z.dw+dx] + pr += p[0] * c.weight + pg += p[1] * c.weight + pb += p[2] * c.weight + pa += p[3] * c.weight + } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + if dstMask != nil { + qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() + pr := uint32(ftou(pr*s.invTotalWeight)) * ma / 0xffff + pg := uint32(ftou(pg*s.invTotalWeight)) * ma / 0xffff + pb := uint32(ftou(pb*s.invTotalWeight)) * ma / 0xffff + pa := uint32(ftou(pa*s.invTotalWeight)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } else { + dstColorRGBA64.R = ftou(pr * s.invTotalWeight) + dstColorRGBA64.G = ftou(pg * s.invTotalWeight) + dstColorRGBA64.B = ftou(pb * s.invTotalWeight) + dstColorRGBA64.A = ftou(pa * s.invTotalWeight) + dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) + } + } + } +} + +func (q *Kernel) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx - src.Rect.Min.X) + pru := uint32(src.Pix[pi]) * 0x101 + pr += float64(pru) * w + } + } + } + } + out := uint8(fffftou(pr) >> 8) + dst.Pix[d+0] = out + dst.Pix[d+1] = out + dst.Pix[d+2] = out + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } } if pr > pa { @@ -4963,112 +6568,114 @@ func (z *kernelScaler) scaleY_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle pb = pa } - pr0 := uint32(ftou(pr * s.invTotalWeight)) - pg0 := uint32(ftou(pg * s.invTotalWeight)) - pb0 := uint32(ftou(pb * s.invTotalWeight)) - pa0 := uint32(ftou(pa * s.invTotalWeight)) + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) pa1 := (0xffff - uint32(pa0)) * 0x101 dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) - d += dst.Stride } } } -func (z *kernelScaler) scaleY_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { - d := (dr.Min.Y+adr.Min.Y-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+int(dx)-dst.Rect.Min.X)*4 - for _, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { - var pr, pg, pb, pa float64 - for _, c := range z.vertical.contribs[s.i:s.j] { - p := &tmp[c.coord*z.dw+dx] - pr += p[0] * c.weight - pg += p[1] * c.weight - pb += p[2] * c.weight - pa += p[3] * c.weight - } +func (q *Kernel) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } - if pr > pa { - pr = pa + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue } - if pg > pa { - pg = pa + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X } - if pb > pa { - pb = pa + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X } - dst.Pix[d+0] = uint8(ftou(pr*s.invTotalWeight) >> 8) - dst.Pix[d+1] = uint8(ftou(pg*s.invTotalWeight) >> 8) - dst.Pix[d+2] = uint8(ftou(pb*s.invTotalWeight) >> 8) - dst.Pix[d+3] = uint8(ftou(pa*s.invTotalWeight) >> 8) - d += dst.Stride - } - } -} - -func (z *kernelScaler) scaleY_Image_Over(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { - dstMask, dmp := opts.DstMask, opts.DstMaskP - dstColorRGBA64 := &color.RGBA64{} - dstColor := color.Color(dstColorRGBA64) - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { - for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { - var pr, pg, pb, pa float64 - for _, c := range z.vertical.contribs[s.i:s.j] { - p := &tmp[c.coord*z.dw+dx] - pr += p[0] * c.weight - pg += p[1] * c.weight - pb += p[2] * c.weight - pa += p[3] * c.weight + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight } - - if pr > pa { - pr = pa + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight } - if pg > pa { - pg = pa + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y } - if pb > pa { - pb = pa + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y } - qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() - pr0 := uint32(ftou(pr * s.invTotalWeight)) - pg0 := uint32(ftou(pg * s.invTotalWeight)) - pb0 := uint32(ftou(pb * s.invTotalWeight)) - pa0 := uint32(ftou(pa * s.invTotalWeight)) - if dstMask != nil { - _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() - pr0 = pr0 * ma / 0xffff - pg0 = pg0 * ma / 0xffff - pb0 = pb0 * ma / 0xffff - pa0 = pa0 * ma / 0xffff + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight } - pa1 := 0xffff - pa0 - dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr0) - dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg0) - dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb0) - dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa0) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) - } - } -} -func (z *kernelScaler) scaleY_Image_Src(dst Image, dr, adr image.Rectangle, tmp [][4]float64, opts *Options) { - dstMask, dmp := opts.DstMask, opts.DstMaskP - dstColorRGBA64 := &color.RGBA64{} - dstColor := color.Color(dstColorRGBA64) - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { - for dy, s := range z.vertical.sources[adr.Min.Y:adr.Max.Y] { var pr, pg, pb, pa float64 - for _, c := range z.vertical.contribs[s.i:s.j] { - p := &tmp[c.coord*z.dw+dx] - pr += p[0] * c.weight - pg += p[1] * c.weight - pb += p[2] * c.weight - pa += p[3] * c.weight + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pau := uint32(src.Pix[pi+3]) * 0x101 + pru := uint32(src.Pix[pi+0]) * pau / 0xff + pgu := uint32(src.Pix[pi+1]) * pau / 0xff + pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w + } + } + } } if pr > pa { @@ -5081,31 +6688,15 @@ func (z *kernelScaler) scaleY_Image_Src(dst Image, dr, adr image.Rectangle, tmp pb = pa } - if dstMask != nil { - qr, qg, qb, qa := dst.At(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy)).RGBA() - _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(adr.Min.Y+dy)).RGBA() - pr := uint32(ftou(pr*s.invTotalWeight)) * ma / 0xffff - pg := uint32(ftou(pg*s.invTotalWeight)) * ma / 0xffff - pb := uint32(ftou(pb*s.invTotalWeight)) * ma / 0xffff - pa := uint32(ftou(pa*s.invTotalWeight)) * ma / 0xffff - pa1 := 0xffff - ma - dstColorRGBA64.R = uint16(qr*pa1/0xffff + pr) - dstColorRGBA64.G = uint16(qg*pa1/0xffff + pg) - dstColorRGBA64.B = uint16(qb*pa1/0xffff + pb) - dstColorRGBA64.A = uint16(qa*pa1/0xffff + pa) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) - } else { - dstColorRGBA64.R = ftou(pr * s.invTotalWeight) - dstColorRGBA64.G = ftou(pg * s.invTotalWeight) - dstColorRGBA64.B = ftou(pb * s.invTotalWeight) - dstColorRGBA64.A = ftou(pa * s.invTotalWeight) - dst.Set(dr.Min.X+int(dx), dr.Min.Y+int(adr.Min.Y+dy), dstColor) - } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) } } } -func (q *Kernel) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.Gray, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5183,28 +6774,49 @@ func (q *Kernel) transform_RGBA_Gray_Src(dst *image.RGBA, dr, adr image.Rectangl yWeights[y] /= totalYWeight } - var pr float64 + var pr, pg, pb, pa float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.Stride + (kx - src.Rect.Min.X) - pru := uint32(src.Pix[pi]) * 0x101 + pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 + pau := uint32(src.Pix[pi+3]) * 0x101 pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + pa += float64(pau) * w } } } } - out := uint8(fffftou(pr) >> 8) - dst.Pix[d+0] = out - dst.Pix[d+1] = out - dst.Pix[d+2] = out - dst.Pix[d+3] = 0xff + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) } } } -func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5288,10 +6900,10 @@ func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectan for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 + pru := uint32(src.Pix[pi+0]) * 0x101 + pgu := uint32(src.Pix[pi+1]) * 0x101 + pbu := uint32(src.Pix[pi+2]) * 0x101 pau := uint32(src.Pix[pi+3]) * 0x101 - pru := uint32(src.Pix[pi+0]) * pau / 0xff - pgu := uint32(src.Pix[pi+1]) * pau / 0xff - pbu := uint32(src.Pix[pi+2]) * pau / 0xff pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w @@ -5311,20 +6923,15 @@ func (q *Kernel) transform_RGBA_NRGBA_Over(dst *image.RGBA, dr, adr image.Rectan pb = pa } - pr0 := uint32(fffftou(pr)) - pg0 := uint32(fffftou(pg)) - pb0 := uint32(fffftou(pb)) - pa0 := uint32(fffftou(pa)) - pa1 := (0xffff - uint32(pa0)) * 0x101 - dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) - dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) - dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) - dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) } } } -func (q *Kernel) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.NRGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5402,44 +7009,53 @@ func (q *Kernel) transform_RGBA_NRGBA_Src(dst *image.RGBA, dr, adr image.Rectang yWeights[y] /= totalYWeight } - var pr, pg, pb, pa float64 + var pr, pg, pb float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 - pau := uint32(src.Pix[pi+3]) * 0x101 - pru := uint32(src.Pix[pi+0]) * pau / 0xff - pgu := uint32(src.Pix[pi+1]) * pau / 0xff - pbu := uint32(src.Pix[pi+2]) * pau / 0xff + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + (kx - src.Rect.Min.X) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w - pa += float64(pau) * w } } } } - - if pr > pa { - pr = pa - } - if pg > pa { - pg = pa - } - if pb > pa { - pb = pa - } - dst.Pix[d+0] = uint8(fffftou(pr) >> 8) dst.Pix[d+1] = uint8(fffftou(pg) >> 8) dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + dst.Pix[d+3] = 0xff } } } -func (q *Kernel) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5517,49 +7133,53 @@ func (q *Kernel) transform_RGBA_RGBA_Over(dst *image.RGBA, dr, adr image.Rectang yWeights[y] /= totalYWeight } - var pr, pg, pb, pa float64 + var pr, pg, pb float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 - pru := uint32(src.Pix[pi+0]) * 0x101 - pgu := uint32(src.Pix[pi+1]) * 0x101 - pbu := uint32(src.Pix[pi+2]) * 0x101 - pau := uint32(src.Pix[pi+3]) * 0x101 + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := (ky-src.Rect.Min.Y)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w - pa += float64(pau) * w } } } } - - if pr > pa { - pr = pa - } - if pg > pa { - pg = pa - } - if pb > pa { - pb = pa - } - - pr0 := uint32(fffftou(pr)) - pg0 := uint32(fffftou(pg)) - pb0 := uint32(fffftou(pb)) - pa0 := uint32(fffftou(pa)) - pa1 := (0xffff - uint32(pa0)) * 0x101 - dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) - dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) - dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) - dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff } } } -func (q *Kernel) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.RGBA, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5637,44 +7257,53 @@ func (q *Kernel) transform_RGBA_RGBA_Src(dst *image.RGBA, dr, adr image.Rectangl yWeights[y] /= totalYWeight } - var pr, pg, pb, pa float64 + var pr, pg, pb float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.Stride + (kx-src.Rect.Min.X)*4 - pru := uint32(src.Pix[pi+0]) * 0x101 - pgu := uint32(src.Pix[pi+1]) * 0x101 - pbu := uint32(src.Pix[pi+2]) * 0x101 - pau := uint32(src.Pix[pi+3]) * 0x101 + pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) + + // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. + pyy1 := int(src.Y[pi]) * 0x10101 + pcb1 := int(src.Cb[pj]) - 128 + pcr1 := int(src.Cr[pj]) - 128 + pru := (pyy1 + 91881*pcr1) >> 8 + pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 + pbu := (pyy1 + 116130*pcb1) >> 8 + if pru < 0 { + pru = 0 + } else if pru > 0xffff { + pru = 0xffff + } + if pgu < 0 { + pgu = 0 + } else if pgu > 0xffff { + pgu = 0xffff + } + if pbu < 0 { + pbu = 0 + } else if pbu > 0xffff { + pbu = 0xffff + } + pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w - pa += float64(pau) * w } } } } - - if pr > pa { - pr = pa - } - if pg > pa { - pg = pa - } - if pb > pa { - pb = pa - } - dst.Pix[d+0] = uint8(fffftou(pr) >> 8) dst.Pix[d+1] = uint8(fffftou(pg) >> 8) dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + dst.Pix[d+3] = 0xff } } } -func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5758,7 +7387,7 @@ func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rect for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) - pj := (ky-src.Rect.Min.Y)*src.CStride + (kx - src.Rect.Min.X) + pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + (kx - src.Rect.Min.X) // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. pyy1 := int(src.Y[pi]) * 0x10101 @@ -5783,22 +7412,138 @@ func (q *Kernel) transform_RGBA_YCbCr444_Src(dst *image.RGBA, dr, adr image.Rect pbu = 0xffff } - pr += float64(pru) * w - pg += float64(pgu) * w - pb += float64(pbu) * w + pr += float64(pru) * w + pg += float64(pgu) * w + pb += float64(pbu) * w + } + } + } + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) + dst.Pix[d+1] = uint8(fffftou(pg) >> 8) + dst.Pix[d+2] = uint8(fffftou(pb) >> 8) + dst.Pix[d+3] = 0xff + } + } +} + +func (q *Kernel) transform_RGBA_RGBA64Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { + // When shrinking, broaden the effective kernel support so that we still + // visit every source pixel. + xHalfWidth, xKernelArgScale := q.Support, 1.0 + if xscale > 1 { + xHalfWidth *= xscale + xKernelArgScale = 1 / xscale + } + yHalfWidth, yKernelArgScale := q.Support, 1.0 + if yscale > 1 { + yHalfWidth *= yscale + yKernelArgScale = 1 / yscale + } + + xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) + yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { + dyf := float64(dr.Min.Y+int(dy)) + 0.5 + d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + dxf := float64(dr.Min.X+int(dx)) + 0.5 + sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] + sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] + if !(image.Point{int(sx) + bias.X, int(sy) + bias.Y}).In(sr) { + continue + } + + // TODO: adjust the bias so that we can use int(f) instead + // of math.Floor(f) and math.Ceil(f). + sx += float64(bias.X) + sx -= 0.5 + ix := int(math.Floor(sx - xHalfWidth)) + if ix < sr.Min.X { + ix = sr.Min.X + } + jx := int(math.Ceil(sx + xHalfWidth)) + if jx > sr.Max.X { + jx = sr.Max.X + } + + totalXWeight := 0.0 + for kx := ix; kx < jx; kx++ { + xWeight := 0.0 + if t := abs((sx - float64(kx)) * xKernelArgScale); t < q.Support { + xWeight = q.At(t) + } + xWeights[kx-ix] = xWeight + totalXWeight += xWeight + } + for x := range xWeights[:jx-ix] { + xWeights[x] /= totalXWeight + } + + sy += float64(bias.Y) + sy -= 0.5 + iy := int(math.Floor(sy - yHalfWidth)) + if iy < sr.Min.Y { + iy = sr.Min.Y + } + jy := int(math.Ceil(sy + yHalfWidth)) + if jy > sr.Max.Y { + jy = sr.Max.Y + } + + totalYWeight := 0.0 + for ky := iy; ky < jy; ky++ { + yWeight := 0.0 + if t := abs((sy - float64(ky)) * yKernelArgScale); t < q.Support { + yWeight = q.At(t) + } + yWeights[ky-iy] = yWeight + totalYWeight += yWeight + } + for y := range yWeights[:jy-iy] { + yWeights[y] /= totalYWeight + } + + var pr, pg, pb, pa float64 + for ky := iy; ky < jy; ky++ { + if yWeight := yWeights[ky-iy]; yWeight != 0 { + for kx := ix; kx < jx; kx++ { + if w := xWeights[kx-ix] * yWeight; w != 0 { + pu := src.RGBA64At(kx, ky) + pr += float64(pu.R) * w + pg += float64(pu.G) * w + pb += float64(pu.B) * w + pa += float64(pu.A) * w } } } } - dst.Pix[d+0] = uint8(fffftou(pr) >> 8) - dst.Pix[d+1] = uint8(fffftou(pg) >> 8) - dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = 0xff + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) } } } -func (q *Kernel) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_RGBA64Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -5876,53 +7621,40 @@ func (q *Kernel) transform_RGBA_YCbCr422_Src(dst *image.RGBA, dr, adr image.Rect yWeights[y] /= totalYWeight } - var pr, pg, pb float64 + var pr, pg, pb, pa float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) - pj := (ky-src.Rect.Min.Y)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) - - // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. - pyy1 := int(src.Y[pi]) * 0x10101 - pcb1 := int(src.Cb[pj]) - 128 - pcr1 := int(src.Cr[pj]) - 128 - pru := (pyy1 + 91881*pcr1) >> 8 - pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 - pbu := (pyy1 + 116130*pcb1) >> 8 - if pru < 0 { - pru = 0 - } else if pru > 0xffff { - pru = 0xffff - } - if pgu < 0 { - pgu = 0 - } else if pgu > 0xffff { - pgu = 0xffff - } - if pbu < 0 { - pbu = 0 - } else if pbu > 0xffff { - pbu = 0xffff - } - - pr += float64(pru) * w - pg += float64(pgu) * w - pb += float64(pbu) * w + pu := src.RGBA64At(kx, ky) + pr += float64(pu.R) * w + pg += float64(pu.G) * w + pb += float64(pu.B) * w + pa += float64(pu.A) * w } } } } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) dst.Pix[d+1] = uint8(fffftou(pg) >> 8) dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = 0xff + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) } } } -func (q *Kernel) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -6000,53 +7732,45 @@ func (q *Kernel) transform_RGBA_YCbCr420_Src(dst *image.RGBA, dr, adr image.Rect yWeights[y] /= totalYWeight } - var pr, pg, pb float64 + var pr, pg, pb, pa float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) - pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + ((kx)/2 - src.Rect.Min.X/2) - - // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. - pyy1 := int(src.Y[pi]) * 0x10101 - pcb1 := int(src.Cb[pj]) - 128 - pcr1 := int(src.Cr[pj]) - 128 - pru := (pyy1 + 91881*pcr1) >> 8 - pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 - pbu := (pyy1 + 116130*pcb1) >> 8 - if pru < 0 { - pru = 0 - } else if pru > 0xffff { - pru = 0xffff - } - if pgu < 0 { - pgu = 0 - } else if pgu > 0xffff { - pgu = 0xffff - } - if pbu < 0 { - pbu = 0 - } else if pbu > 0xffff { - pbu = 0xffff - } - + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w + pa += float64(pau) * w } } } } - dst.Pix[d+0] = uint8(fffftou(pr) >> 8) - dst.Pix[d+1] = uint8(fffftou(pg) >> 8) - dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = 0xff + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + + pr0 := uint32(fffftou(pr)) + pg0 := uint32(fffftou(pg)) + pb0 := uint32(fffftou(pb)) + pa0 := uint32(fffftou(pa)) + pa1 := (0xffff - uint32(pa0)) * 0x101 + dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) + dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) + dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) + dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) } } } -func (q *Kernel) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src *image.YCbCr, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -6124,53 +7848,40 @@ func (q *Kernel) transform_RGBA_YCbCr440_Src(dst *image.RGBA, dr, adr image.Rect yWeights[y] /= totalYWeight } - var pr, pg, pb float64 + var pr, pg, pb, pa float64 for ky := iy; ky < jy; ky++ { if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pi := (ky-src.Rect.Min.Y)*src.YStride + (kx - src.Rect.Min.X) - pj := ((ky)/2-src.Rect.Min.Y/2)*src.CStride + (kx - src.Rect.Min.X) - - // This is an inline version of image/color/ycbcr.go's YCbCr.RGBA method. - pyy1 := int(src.Y[pi]) * 0x10101 - pcb1 := int(src.Cb[pj]) - 128 - pcr1 := int(src.Cr[pj]) - 128 - pru := (pyy1 + 91881*pcr1) >> 8 - pgu := (pyy1 - 22554*pcb1 - 46802*pcr1) >> 8 - pbu := (pyy1 + 116130*pcb1) >> 8 - if pru < 0 { - pru = 0 - } else if pru > 0xffff { - pru = 0xffff - } - if pgu < 0 { - pgu = 0 - } else if pgu > 0xffff { - pgu = 0xffff - } - if pbu < 0 { - pbu = 0 - } else if pbu > 0xffff { - pbu = 0xffff - } - + pru, pgu, pbu, pau := src.At(kx, ky).RGBA() pr += float64(pru) * w pg += float64(pgu) * w pb += float64(pbu) * w + pa += float64(pau) * w } } } } + + if pr > pa { + pr = pa + } + if pg > pa { + pg = pa + } + if pb > pa { + pb = pa + } + dst.Pix[d+0] = uint8(fffftou(pr) >> 8) dst.Pix[d+1] = uint8(fffftou(pg) >> 8) dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = 0xff + dst.Pix[d+3] = uint8(fffftou(pa) >> 8) } } } -func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA64Image_RGBA64Image_Over(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -6187,10 +7898,13 @@ func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectan xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { dyf := float64(dr.Min.Y+int(dy)) + 0.5 - d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { dxf := float64(dr.Min.X+int(dx)) + 0.5 sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] @@ -6253,11 +7967,18 @@ func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectan if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pru, pgu, pbu, pau := src.At(kx, ky).RGBA() - pr += float64(pru) * w - pg += float64(pgu) * w - pb += float64(pbu) * w - pa += float64(pau) * w + pu := src.RGBA64At(kx, ky) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pu.R = uint16(uint32(pu.R) * ma / 0xffff) + pu.G = uint16(uint32(pu.G) * ma / 0xffff) + pu.B = uint16(uint32(pu.B) * ma / 0xffff) + pu.A = uint16(uint32(pu.A) * ma / 0xffff) + } + pr += float64(pu.R) * w + pg += float64(pu.G) * w + pb += float64(pu.B) * w + pa += float64(pu.A) * w } } } @@ -6273,20 +7994,29 @@ func (q *Kernel) transform_RGBA_Image_Over(dst *image.RGBA, dr, adr image.Rectan pb = pa } + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) pr0 := uint32(fffftou(pr)) pg0 := uint32(fffftou(pg)) pb0 := uint32(fffftou(pb)) pa0 := uint32(fffftou(pa)) - pa1 := (0xffff - uint32(pa0)) * 0x101 - dst.Pix[d+0] = uint8((uint32(dst.Pix[d+0])*pa1/0xffff + pr0) >> 8) - dst.Pix[d+1] = uint8((uint32(dst.Pix[d+1])*pa1/0xffff + pg0) >> 8) - dst.Pix[d+2] = uint8((uint32(dst.Pix[d+2])*pa1/0xffff + pb0) >> 8) - dst.Pix[d+3] = uint8((uint32(dst.Pix[d+3])*pa1/0xffff + pa0) >> 8) + if dstMask != nil { + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr0 = pr0 * ma / 0xffff + pg0 = pg0 * ma / 0xffff + pb0 = pb0 * ma / 0xffff + pa0 = pa0 * ma / 0xffff + } + pa1 := 0xffff - pa0 + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + pr0) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + pg0) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + pb0) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + pa0) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) } } } -func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectangle, d2s *f64.Aff3, src image.Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { +func (q *Kernel) transform_RGBA64Image_RGBA64Image_Src(dst RGBA64Image, dr, adr image.Rectangle, d2s *f64.Aff3, src image.RGBA64Image, sr image.Rectangle, bias image.Point, xscale, yscale float64, opts *Options) { // When shrinking, broaden the effective kernel support so that we still // visit every source pixel. xHalfWidth, xKernelArgScale := q.Support, 1.0 @@ -6303,10 +8033,13 @@ func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectang xWeights := make([]float64, 1+2*int(math.Ceil(xHalfWidth))) yWeights := make([]float64, 1+2*int(math.Ceil(yHalfWidth))) + srcMask, smp := opts.SrcMask, opts.SrcMaskP + dstMask, dmp := opts.DstMask, opts.DstMaskP + dstColorRGBA64 := color.RGBA64{} + for dy := int32(adr.Min.Y); dy < int32(adr.Max.Y); dy++ { dyf := float64(dr.Min.Y+int(dy)) + 0.5 - d := (dr.Min.Y+int(dy)-dst.Rect.Min.Y)*dst.Stride + (dr.Min.X+adr.Min.X-dst.Rect.Min.X)*4 - for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx, d = dx+1, d+4 { + for dx := int32(adr.Min.X); dx < int32(adr.Max.X); dx++ { dxf := float64(dr.Min.X+int(dx)) + 0.5 sx := d2s[0]*dxf + d2s[1]*dyf + d2s[2] sy := d2s[3]*dxf + d2s[4]*dyf + d2s[5] @@ -6369,11 +8102,18 @@ func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectang if yWeight := yWeights[ky-iy]; yWeight != 0 { for kx := ix; kx < jx; kx++ { if w := xWeights[kx-ix] * yWeight; w != 0 { - pru, pgu, pbu, pau := src.At(kx, ky).RGBA() - pr += float64(pru) * w - pg += float64(pgu) * w - pb += float64(pbu) * w - pa += float64(pau) * w + pu := src.RGBA64At(kx, ky) + if srcMask != nil { + _, _, _, ma := srcMask.At(smp.X+kx, smp.Y+ky).RGBA() + pu.R = uint16(uint32(pu.R) * ma / 0xffff) + pu.G = uint16(uint32(pu.G) * ma / 0xffff) + pu.B = uint16(uint32(pu.B) * ma / 0xffff) + pu.A = uint16(uint32(pu.A) * ma / 0xffff) + } + pr += float64(pu.R) * w + pg += float64(pu.G) * w + pb += float64(pu.B) * w + pa += float64(pu.A) * w } } } @@ -6389,10 +8129,26 @@ func (q *Kernel) transform_RGBA_Image_Src(dst *image.RGBA, dr, adr image.Rectang pb = pa } - dst.Pix[d+0] = uint8(fffftou(pr) >> 8) - dst.Pix[d+1] = uint8(fffftou(pg) >> 8) - dst.Pix[d+2] = uint8(fffftou(pb) >> 8) - dst.Pix[d+3] = uint8(fffftou(pa) >> 8) + if dstMask != nil { + q := dst.RGBA64At(dr.Min.X+int(dx), dr.Min.Y+int(dy)) + _, _, _, ma := dstMask.At(dmp.X+dr.Min.X+int(dx), dmp.Y+dr.Min.Y+int(dy)).RGBA() + pr := uint32(fffftou(pr)) * ma / 0xffff + pg := uint32(fffftou(pg)) * ma / 0xffff + pb := uint32(fffftou(pb)) * ma / 0xffff + pa := uint32(fffftou(pa)) * ma / 0xffff + pa1 := 0xffff - ma + dstColorRGBA64.R = uint16(uint32(q.R)*pa1/0xffff + pr) + dstColorRGBA64.G = uint16(uint32(q.G)*pa1/0xffff + pg) + dstColorRGBA64.B = uint16(uint32(q.B)*pa1/0xffff + pb) + dstColorRGBA64.A = uint16(uint32(q.A)*pa1/0xffff + pa) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } else { + dstColorRGBA64.R = fffftou(pr) + dstColorRGBA64.G = fffftou(pg) + dstColorRGBA64.B = fffftou(pb) + dstColorRGBA64.A = fffftou(pa) + dst.SetRGBA64(dr.Min.X+int(dx), dr.Min.Y+int(dy), dstColorRGBA64) + } } } } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/scale.go b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/scale.go index 00121a129..ba1bdf3ce 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/scale.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/image/draw/scale.go @@ -46,8 +46,8 @@ type Scaler interface { // // For example, if m is the matrix // -// m00 m01 m02 -// m10 m11 m12 +// m00 m01 m02 +// m10 m11 m12 // // then the src-space point (sx, sy) maps to the dst-space point // (m00*sx + m01*sy + m02, m10*sx + m11*sy + m12). @@ -98,9 +98,9 @@ type Options struct { // have a 1:1 correspondence. // // Of the interpolators provided by this package: -// - NearestNeighbor is fast but usually looks worst. -// - CatmullRom is slow but usually looks best. -// - ApproxBiLinear has reasonable speed and quality. +// - NearestNeighbor is fast but usually looks worst. +// - CatmullRom is slow but usually looks best. +// - ApproxBiLinear has reasonable speed and quality. // // The time taken depends on the size of dr. For kernel interpolators, the // speed also depends on the size of sr, and so are often slower than diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go index 2681af35a..150f887e7 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -13,7 +13,7 @@ import ( "sync" ) -// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be // compiled the first time it is needed. type Regexp struct { str string diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/print.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/print.go index 524f93022..2a0123d4b 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/print.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/print.go @@ -16,7 +16,13 @@ import ( func Format(f *FileSyntax) []byte { pr := &printer{} pr.file(f) - return pr.Bytes() + + // remove trailing blank lines + b := pr.Bytes() + for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { + b = b[:len(b)-1] + } + return b } // A printer collects the state during printing of a file or expression. @@ -59,7 +65,11 @@ func (p *printer) newline() { } p.trim() - p.printf("\n") + if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { + // skip the blank line at top of file or after a blank line + } else { + p.printf("\n") + } for i := 0; i < p.margin; i++ { p.printf("\t") } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/read.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/read.go index 70947ee77..220568259 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/read.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/read.go @@ -65,7 +65,7 @@ type Comments struct { } // Comment returns the receiver. This isn't useful by itself, but -// a Comments struct is embedded into all the expression +// a [Comments] struct is embedded into all the expression // implementation types, and this gives each of those a Comment // method to satisfy the Expr interface. func (c *Comments) Comment() *Comments { @@ -225,7 +225,7 @@ func (x *FileSyntax) Cleanup() { if ww == 0 { continue } - if ww == 1 { + if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { // Collapse block into single line. line := &Line{ Comments: Comments{ @@ -494,7 +494,7 @@ func (in *input) endToken(kind tokenKind) { in.token.endPos = in.pos } -// peek returns the kind of the the next token returned by lex. +// peek returns the kind of the next token returned by lex. func (in *input) peek() tokenKind { return in.token.kind } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/rule.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/rule.go index ed2f31aa7..66dcaf980 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/rule.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/rule.go @@ -5,17 +5,17 @@ // Package modfile implements a parser and formatter for go.mod files. // // The go.mod syntax is described in -// https://golang.org/cmd/go/#hdr-The_go_mod_file. +// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. // -// The Parse and ParseLax functions both parse a go.mod file and return an +// The [Parse] and [ParseLax] functions both parse a go.mod file and return an // abstract syntax tree. ParseLax ignores unknown statements and may be used to // parse go.mod files that may have been developed with newer versions of Go. // -// The File struct returned by Parse and ParseLax represent an abstract -// go.mod file. File has several methods like AddNewRequire and DropReplace -// that can be used to programmatically edit a file. +// The [File] struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like [File.AddNewRequire] and +// [File.DropReplace] that can be used to programmatically edit a file. // -// The Format function formats a File back to a byte slice which can be +// The [Format] function formats a File back to a byte slice which can be // written to a file. package modfile @@ -35,12 +35,14 @@ import ( // A File is the parsed, interpreted form of a go.mod file. type File struct { - Module *Module - Go *Go - Require []*Require - Exclude []*Exclude - Replace []*Replace - Retract []*Retract + Module *Module + Go *Go + Toolchain *Toolchain + Godebug []*Godebug + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract Syntax *FileSyntax } @@ -58,6 +60,19 @@ type Go struct { Syntax *Line } +// A Toolchain is the toolchain statement. +type Toolchain struct { + Name string // "go1.21rc1" + Syntax *Line +} + +// A Godebug is a single godebug key=value statement. +type Godebug struct { + Key string + Value string + Syntax *Line +} + // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version @@ -219,7 +234,7 @@ var dontFixRetract VersionFixer = func(_, vers string) (string, error) { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func Parse(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, true) @@ -282,7 +297,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "require", "exclude", "replace", "retract": + case "module", "godebug", "require", "exclude", "replace", "retract": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -296,9 +311,16 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse return f, nil } -var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) +// Toolchains must be named beginning with `go1`, +// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. +// Note that this regexp is a much looser condition than go/version.IsValid, +// for forward compatibility. +// (This code has to be work to identify new toolchains even if we tweak the syntax in the future.) +var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) + func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. // We ignore all unknown directives as well as main-module-only @@ -356,7 +378,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } } if !fixed { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } } @@ -364,6 +386,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + case "module": if f.Module != nil { errorf("repeated module statement") @@ -385,6 +422,22 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } f.Module.Mod = module.Version{Path: s} + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "require", "exclude": if len(args) != 2 { errorf("usage: %s module/path v1.2.3", verb) @@ -513,7 +566,10 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V nv := "" if len(args) == arrow+2 { if !IsDirectoryPath(ns) { - return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + if strings.Contains(ns, "@") { + return nil, errorf("replacement module must match format 'path version', not 'path@version'") + } + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") @@ -526,7 +582,6 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) - } } return &Replace{ @@ -602,13 +657,45 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, errorf("go directive expects exactly one argument") return } else if !GoVersionRE.MatchString(args[0]) { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) + return + } + + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + + case "godebug": + if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { + errorf("usage: godebug key=value") + return + } + key, value, ok := strings.Cut(args[0], "=") + if !ok { + errorf("usage: godebug key=value") + return + } + f.Godebug = append(f.Godebug, &Godebug{ + Key: key, + Value: value, + Syntax: line, + }) + case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) @@ -634,14 +721,15 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, } } -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } @@ -878,11 +966,20 @@ func (f *File) Format() ([]byte, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [File.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { w := 0 + for _, g := range f.Godebug { + if g.Key != "" { + f.Godebug[w] = g + w++ + } + } + f.Godebug = f.Godebug[:w] + + w = 0 for _, r := range f.Require { if r.Mod.Path != "" { f.Require[w] = r @@ -923,12 +1020,14 @@ func (f *File) Cleanup() { func (f *File) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { var hint Expr if f.Module != nil && f.Module.Syntax != nil { hint = f.Module.Syntax + } else if f.Syntax == nil { + f.Syntax = new(FileSyntax) } f.Go = &Go{ Version: version, @@ -941,6 +1040,83 @@ func (f *File) AddGoStmt(version string) error { return nil } +// DropGoStmt deletes the go statement from the file. +func (f *File) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *File) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +func (f *File) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + var hint Expr + if f.Go != nil && f.Go.Syntax != nil { + hint = f.Go.Syntax + } else if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Toolchain = &Toolchain{ + Name: name, + Syntax: f.Syntax.addLine(hint, "toolchain", name), + } + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *File) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *File) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. @@ -992,8 +1168,8 @@ func (f *File) AddNewRequire(path, vers string, indirect bool) { // The requirements in req must specify at most one distinct version for each // module path. // -// If any existing requirements may be removed, the caller should call Cleanup -// after all edits are complete. +// If any existing requirements may be removed, the caller should call +// [File.Cleanup] after all edits are complete. func (f *File) SetRequire(req []*Require) { type elem struct { version string @@ -1248,6 +1424,16 @@ func (f *File) SetRequireSeparateIndirect(req []*Require) { f.SortBlocks() } +func (f *File) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *File) DropRequire(path string) error { for _, r := range f.Require { if r.Mod.Path == path { @@ -1384,13 +1570,21 @@ func (f *File) DropRetract(vi VersionInterval) error { func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe + // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which + // lines in exclude blocks start to use semantic sort instead of lexicographic sort. + // See go.dev/issue/60028. + const semanticSortForExcludeVersionV = "v1.21" + useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 + for _, stmt := range f.Syntax.Stmt { block, ok := stmt.(*LineBlock) if !ok { continue } less := lineLess - if block.Token[0] == "retract" { + if block.Token[0] == "exclude" && useSemanticSortForExclude { + less = lineExcludeLess + } else if block.Token[0] == "retract" { less = lineRetractLess } sort.SliceStable(block.Line, func(i, j int) bool { @@ -1493,6 +1687,22 @@ func lineLess(li, lj *Line) bool { return len(li.Token) < len(lj.Token) } +// lineExcludeLess reports whether li should be sorted before lj for lines in +// an "exclude" block. +func lineExcludeLess(li, lj *Line) bool { + if len(li.Token) != 2 || len(lj.Token) != 2 { + // Not a known exclude specification. + // Fall back to sorting lexicographically. + return lineLess(li, lj) + } + // An exclude specification has two tokens: ModulePath and Version. + // Compare module path by string order and version by semver rules. + if pi, pj := li.Token[0], lj.Token[0]; pi != pj { + return pi < pj + } + return semver.Compare(li.Token[1], lj.Token[1]) < 0 +} + // lineRetractLess returns whether li should be sorted before lj for lines in // a "retract" block. It treats each line as a version interval. Single versions // are compared as if they were intervals with the same low and high version. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/work.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/work.go index 0c0e52152..8f54897cf 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/work.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/modfile/work.go @@ -12,9 +12,11 @@ import ( // A WorkFile is the parsed, interpreted form of a go.work file. type WorkFile struct { - Go *Go - Use []*Use - Replace []*Replace + Go *Go + Toolchain *Toolchain + Godebug []*Godebug + Use []*Use + Replace []*Replace Syntax *FileSyntax } @@ -33,7 +35,7 @@ type Use struct { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { fs, err := parse(file, data) @@ -67,7 +69,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) continue - case "use", "replace": + case "godebug", "use", "replace": for _, l := range x.Line { f.add(&errs, l, x.Token[0], l.Token, fix) } @@ -82,7 +84,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [WorkFile.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *WorkFile) Cleanup() { @@ -109,7 +111,7 @@ func (f *WorkFile) Cleanup() { func (f *WorkFile) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { stmt := &Line{Token: []string{"go", version}} @@ -117,7 +119,7 @@ func (f *WorkFile) AddGoStmt(version string) error { Version: version, Syntax: stmt, } - // Find the first non-comment-only block that's and add + // Find the first non-comment-only block and add // the go statement before it. That will keep file comments at the top. i := 0 for i = 0; i < len(f.Syntax.Stmt); i++ { @@ -133,6 +135,105 @@ func (f *WorkFile) AddGoStmt(version string) error { return nil } +func (f *WorkFile) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + stmt := &Line{Token: []string{"toolchain", name}} + f.Toolchain = &Toolchain{ + Name: name, + Syntax: stmt, + } + // Find the go line and add the toolchain line after it. + // Or else find the first non-comment-only block and add + // the toolchain line before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { + i++ + goto Found + } + } + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + Found: + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// DropGoStmt deletes the go statement from the file. +func (f *WorkFile) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *WorkFile) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +// AddGodebug sets the first godebug line for key to value, +// preserving any existing comments for that line and removing all +// other godebug lines for key. +// +// If no line currently exists for key, AddGodebug adds a new line +// at the end of the last godebug block. +func (f *WorkFile) AddGodebug(key, value string) error { + need := true + for _, g := range f.Godebug { + if g.Key == key { + if need { + g.Value = value + f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) + need = false + } else { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + } + + if need { + f.addNewGodebug(key, value) + } + return nil +} + +// addNewGodebug adds a new godebug key=value line at the end +// of the last godebug block, regardless of any existing godebug lines for key. +func (f *WorkFile) addNewGodebug(key, value string) { + line := f.Syntax.addLine(nil, "godebug", key+"="+value) + g := &Godebug{ + Key: key, + Value: value, + Syntax: line, + } + f.Godebug = append(f.Godebug, g) +} + +func (f *WorkFile) DropGodebug(key string) error { + for _, g := range f.Godebug { + if g.Key == key { + g.Syntax.markRemoved() + *g = Godebug{} + } + } + return nil +} + func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/module.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/module.go index c26d1d29e..cac1a899e 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/module.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/module.go @@ -4,7 +4,7 @@ // Package module defines the module.Version type along with support code. // -// The module.Version type is a simple Path, Version pair: +// The [module.Version] type is a simple Path, Version pair: // // type Version struct { // Path string @@ -12,7 +12,7 @@ // } // // There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that +// but additional checking functions, most notably [Check], verify that // a particular path, version pair is valid. // // # Escaped Paths @@ -96,13 +96,13 @@ package module // Changes to the semantics in this file require approval from rsc. import ( + "errors" "fmt" "path" "sort" "strings" "unicode" "unicode/utf8" - "errors" "golang.org/x/mod/semver" ) @@ -140,7 +140,7 @@ type ModuleError struct { Err error } -// VersionError returns a ModuleError derived from a Version and error, +// VersionError returns a [ModuleError] derived from a [Version] and error, // or err itself if it is already such an error. func VersionError(v Version, err error) error { var mErr *ModuleError @@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err } // An InvalidVersionError indicates an error specific to a version, with the // module path unknown or specified externally. // -// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError // must not wrap a ModuleError. type InvalidVersionError struct { Version string @@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string { func (e *InvalidVersionError) Unwrap() error { return e.Err } // An InvalidPathError indicates a module, import, or file path doesn't -// satisfy all naming constraints. See CheckPath, CheckImportPath, -// and CheckFilePath for specific restrictions. +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. type InvalidPathError struct { Kind string // "module", "import", or "file" Path string @@ -258,7 +258,7 @@ func modPathOK(r rune) bool { return false } -// modPathOK reports whether r can appear in a package import path element. +// importPathOK reports whether r can appear in a package import path element. // // Import paths are intermediate between module paths and file paths: we allow // disallow characters that would be confusing or ambiguous as arguments to @@ -294,7 +294,7 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. -// A valid module path is a valid import path, as checked by CheckImportPath, +// A valid module path is a valid import path, as checked by [CheckImportPath], // with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, @@ -380,7 +380,7 @@ const ( // checkPath returns an error describing why the path is not valid. // Because these checks apply to module, import, and file paths, // and because other checks may be applied, the caller is expected to wrap -// this error with InvalidPathError. +// this error with [InvalidPathError]. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") @@ -506,6 +506,7 @@ var badWindowsNames = []string{ "PRN", "AUX", "NUL", + "COM0", "COM1", "COM2", "COM3", @@ -515,6 +516,7 @@ var badWindowsNames = []string{ "COM7", "COM8", "COM9", + "LPT0", "LPT1", "LPT2", "LPT3", @@ -532,7 +534,7 @@ var badWindowsNames = []string{ // they require ".vN" instead of "/vN", and for all N, not just N >= 2. // SplitPathVersion returns with ok = false when presented with // a path whose last path element does not satisfy the constraints -// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -582,7 +584,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. // -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. func MatchPathMajor(v, pathMajor string) bool { return CheckPathMajor(v, pathMajor) == nil } @@ -622,7 +624,7 @@ func CheckPathMajor(v, pathMajor string) error { // PathMajorPrefix returns the major-version tag prefix implied by pathMajor. // An empty PathMajorPrefix allows either v0 or v1. // -// Note that MatchPathMajor may accept some versions that do not actually begin +// Note that [MatchPathMajor] may accept some versions that do not actually begin // with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' // pathMajor, even though that pathMajor implies 'v1' tagging. func PathMajorPrefix(pathMajor string) string { @@ -643,7 +645,7 @@ func PathMajorPrefix(pathMajor string) string { } // CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". func CanonicalVersion(v string) string { cv := semver.Canonical(v) if semver.Build(v) == "+incompatible" { @@ -652,8 +654,8 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { @@ -793,7 +795,7 @@ func unescapeString(escaped string) (string, bool) { } // MatchPrefixPatterns reports whether any path prefix of target matches one of -// the glob patterns (as defined by path.Match) in the comma-separated globs +// the glob patterns (as defined by [path.Match]) in the comma-separated globs // list. This implements the algorithm used when matching a module path to the // GOPRIVATE environment variable, as described by 'go help module-private'. // diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/pseudo.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/pseudo.go index f04ad3788..9cf19d325 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/pseudo.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/module/pseudo.go @@ -125,7 +125,7 @@ func IsPseudoVersion(v string) bool { } // IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, -// timestamp, and revision, as returned by ZeroPseudoVersion. +// timestamp, and revision, as returned by [ZeroPseudoVersion]. func IsZeroPseudoVersion(v string) bool { return v == ZeroPseudoVersion(semver.Major(v)) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/semver/semver.go b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/semver/semver.go index a30a22bf2..9a2dfd33a 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/mod/semver/semver.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/errgroup.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/errgroup.go index cbee7a4e2..948a3ee63 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( @@ -20,7 +23,7 @@ type token struct{} // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. type Group struct { - cancel func() + cancel func(error) wg sync.WaitGroup @@ -43,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := withCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -52,7 +55,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) { func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { - g.cancel() + g.cancel(g.err) } return g.err } @@ -76,7 +79,7 @@ func (g *Group) Go(f func() error) { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } @@ -105,7 +108,7 @@ func (g *Group) TryGo(f func() error) bool { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/go120.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 000000000..f93c740b6 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/pre_go120.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 000000000..88ce33434 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index b981cfbb4..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 6ab5f5089..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 -// +build !go1.19 - -package execabs - -func isGo119ErrDot(err error) bool { - return false -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go119.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index 1e7a9ada0..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package execabs - -import "strings" - -func isGo119ErrDot(err error) bool { - // TODO: return errors.Is(err, exec.ErrDot) - return strings.Contains(err.Error(), "current directory") -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b90..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/aliases.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/aliases.go index a20ebea63..16f90560a 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/aliases.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/aliases.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 -// +build windows,go1.9 +//go:build windows package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/empty.s b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index fdbbbcd31..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/env_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05ff4..d4577a423 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/env_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) - for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/eventlog.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645e..6c366955d 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/eventlog.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/exec_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd44..9cabbb694 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/exec_windows.go @@ -22,7 +22,7 @@ import ( // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ func EscapeArg(s string) string { } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,36 +82,106 @@ func EscapeArg(s string) string { // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " + if len(args) == 0 { + return "" + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) + } + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog } - commandLine += EscapeArg(args[i]) + commandLine = []byte(prog) } - return commandLine + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/mksyscall.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c5..dbcdb090c 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race.go index 9196b089c..0f1bdc386 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race0.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race0.go index 7bae4817a..0c78da78b 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race0.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/security_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/security_windows.go index d414ef13b..6f7d2ac70 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/security_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,8 +7,6 @@ package windows import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -70,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -1341,21 +1340,14 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/service.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/service.go index f8deca839..a9dc6308d 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/service.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows @@ -141,6 +140,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -212,6 +217,10 @@ type SERVICE_FAILURE_ACTIONS struct { Actions *SC_ACTION } +type SERVICE_FAILURE_ACTIONS_FLAG struct { + FailureActionsOnNonCrashFailures int32 +} + type SC_ACTION struct { Type uint32 Delay uint32 @@ -245,3 +254,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/setupapi_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/setupapi_windows.go index 14027da3f..f8126482f 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/setupapi_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -296,7 +296,7 @@ const ( // Flag to indicate that the sorting from the INF file should be used. DI_INF_IS_SORTED DI_FLAGS = 0x00008000 - // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 // Flag that prevents ConfigMgr from removing/re-enumerating devices during device diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/str.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/str.go index 4fc01434e..6a4f9ce6a 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/str.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall.go index 72074d582..e85ed6b9c 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and @@ -30,8 +29,6 @@ import ( "strings" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -83,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall_windows.go index be3ec2bd4..6525c62f3 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,14 +10,11 @@ import ( errorspkg "errors" "fmt" "runtime" - "strings" "sync" "syscall" "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -87,22 +84,13 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, syscall.EINVAL - } - return utf16.Encode([]rune(s + "\x00")), nil + return syscall.UTF16FromString(s) } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, // with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) + return syscall.UTF16ToString(s) } // StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. @@ -137,28 +125,21 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - var s []uint16 - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(utf16.Decode(s)) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } @@ -173,6 +154,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -182,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -210,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -232,7 +217,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -251,12 +236,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -315,12 +301,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -360,10 +349,31 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) +//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows +//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows +//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW +//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow +//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow +//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow +//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode +//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible +//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -411,12 +421,13 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW // Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses //sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules //sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation //sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW //sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW +//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -438,6 +449,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable //sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable +// Desktop Window Manager API (Dwmapi) +//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute +//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute + +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -747,7 +766,7 @@ func Utimes(path string, tv []Timeval) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(tv[0].Nanoseconds()) w := NsecToFiletime(tv[1].Nanoseconds()) return SetFileTime(h, nil, &a, &w) @@ -767,7 +786,7 @@ func UtimesNano(path string, ts []Timespec) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(TimespecToNsec(ts[0])) w := NsecToFiletime(TimespecToNsec(ts[1])) return SetFileTime(h, nil, &a, &w) @@ -825,6 +844,9 @@ const socket_error = uintptr(^uint32(0)) //sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket //sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto //sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom @@ -962,7 +984,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -971,6 +994,32 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), sl, nil } +type RawSockaddrBth struct { + AddressFamily [2]byte + BtAddr [8]byte + ServiceClassId [16]byte + Port [4]byte +} + +type SockaddrBth struct { + BtAddr uint64 + ServiceClassId GUID + Port uint32 + + raw RawSockaddrBth +} + +func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { + family := AF_BTH + sa.raw = RawSockaddrBth{ + AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), + BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), + Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), + ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -994,8 +1043,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1081,9 +1129,13 @@ func Shutdown(fd Handle, how int) (err error) { } func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - rsa, l, err := to.sockaddr() - if err != nil { - return err + var rsa unsafe.Pointer + var l int32 + if to != nil { + rsa, l, err = to.sockaddr() + if err != nil { + return err + } } return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) } @@ -1323,6 +1375,17 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } +func EnumProcesses(processIds []uint32, bytesReturned *uint32) error { + // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses + // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy. + var p *uint32 + if len(processIds) > 0 { + p = &processIds[0] + } + size := uint32(len(processIds) * 4) + return enumProcesses(p, size, bytesReturned) +} + func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { @@ -1582,6 +1645,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } @@ -1616,12 +1684,8 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1644,12 +1708,8 @@ func NewNTString(s string) (*NTString, error) { // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1701,9 +1761,158 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } + +// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. +type PSAPI_WORKING_SET_EX_BLOCK uint64 + +// Valid returns the validity of this page. +// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. +func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { + return (b & 1) == 1 +} + +// ShareCount is the number of processes that share this page. The maximum value of this member is 7. +func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { + return b.intField(1, 3) +} + +// Win32Protection is the memory protection attributes of the page. For a list of values, see +// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants +func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { + return b.intField(4, 11) +} + +// Shared returns the shared status of this page. +// If this bit is 1, the page can be shared. +func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { + return (b & (1 << 15)) == 1 +} + +// Node is the NUMA node. The maximum value of this member is 63. +func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { + return b.intField(16, 6) +} + +// Locked returns the locked status of this page. +// If this bit is 1, the virtual page is locked in physical memory. +func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { + return (b & (1 << 22)) == 1 +} + +// LargePage returns the large page status of this page. +// If this bit is 1, the page is a large page. +func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { + return (b & (1 << 23)) == 1 +} + +// Bad returns the bad status of this page. +// If this bit is 1, the page is has been reported as bad. +func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { + return (b & (1 << 31)) == 1 +} + +// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. +func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { + var mask PSAPI_WORKING_SET_EX_BLOCK + for pos := start; pos < start+length; pos++ { + mask |= (1 << pos) + } + + masked := b & mask + return uint64(masked >> start) +} + +// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. +type PSAPI_WORKING_SET_EX_INFORMATION struct { + // The virtual address. + VirtualAddress Pointer + // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. + VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK +} + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/types_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/types_windows.go index f9eaca528..d8cb71db0 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/types_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/types_windows.go @@ -247,6 +247,7 @@ const ( PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -1093,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 @@ -1243,6 +1270,51 @@ const ( DnsSectionAdditional = 0x0003 ) +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + type DNSSRVData struct { Target *uint16 Priority uint16 @@ -2094,6 +2166,12 @@ const ( ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 @@ -2175,19 +2253,23 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) @@ -3213,3 +3295,112 @@ type ModuleInfo struct { } const ALL_PROCESSOR_GROUPS = 0xFFFF + +type Rect struct { + Left int32 + Top int32 + Right int32 + Bottom int32 +} + +type GUIThreadInfo struct { + Size uint32 + Flags uint32 + Active HWND + Focus HWND + Capture HWND + MenuOwner HWND + MoveSize HWND + CaretHandle HWND + CaretRect Rect +} + +const ( + DWMWA_NCRENDERING_ENABLED = 1 + DWMWA_NCRENDERING_POLICY = 2 + DWMWA_TRANSITIONS_FORCEDISABLED = 3 + DWMWA_ALLOW_NCPAINT = 4 + DWMWA_CAPTION_BUTTON_BOUNDS = 5 + DWMWA_NONCLIENT_RTL_LAYOUT = 6 + DWMWA_FORCE_ICONIC_REPRESENTATION = 7 + DWMWA_FLIP3D_POLICY = 8 + DWMWA_EXTENDED_FRAME_BOUNDS = 9 + DWMWA_HAS_ICONIC_BITMAP = 10 + DWMWA_DISALLOW_PEEK = 11 + DWMWA_EXCLUDED_FROM_PEEK = 12 + DWMWA_CLOAK = 13 + DWMWA_CLOAKED = 14 + DWMWA_FREEZE_REPRESENTATION = 15 + DWMWA_PASSIVE_UPDATE_MODE = 16 + DWMWA_USE_HOSTBACKDROPBRUSH = 17 + DWMWA_USE_IMMERSIVE_DARK_MODE = 20 + DWMWA_WINDOW_CORNER_PREFERENCE = 33 + DWMWA_BORDER_COLOR = 34 + DWMWA_CAPTION_COLOR = 35 + DWMWA_TEXT_COLOR = 36 + DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 +) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 678262cda..9f73df75b 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -40,6 +40,7 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") + moddwmapi = NewLazySystemDLL("dwmapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modmswsock = NewLazySystemDLL("mswsock.dll") @@ -54,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -85,6 +87,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") @@ -175,14 +178,20 @@ var ( procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -197,6 +206,7 @@ var ( procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -204,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -228,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -246,9 +260,11 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -312,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -321,8 +338,13 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -338,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -348,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -366,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -376,6 +401,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -408,6 +434,7 @@ var ( procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") + procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") @@ -443,9 +470,18 @@ var ( procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procEnumChildWindows = moduser32.NewProc("EnumChildWindows") + procEnumWindows = moduser32.NewProc("EnumWindows") procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetClassNameW = moduser32.NewProc("GetClassNameW") + procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") + procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") + procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procIsWindow = moduser32.NewProc("IsWindow") + procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") procMessageBoxW = moduser32.NewProc("MessageBoxW") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") @@ -453,6 +489,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -460,6 +498,9 @@ var ( procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") procWSASend = modws2_32.NewProc("WSASend") @@ -717,6 +758,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { @@ -1524,6 +1573,22 @@ func DnsRecordListFree(rl *DNSRecord, freetype uint32) { return } +func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1556,6 +1621,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -1580,6 +1654,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1588,6 +1678,11 @@ func CloseHandle(handle Handle) (err error) { return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1717,6 +1812,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -1771,6 +1874,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1783,6 +1894,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -1984,6 +2103,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2124,6 +2259,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2151,6 +2294,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -2319,11 +2468,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -2725,6 +2871,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2806,6 +2960,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2814,6 +2976,14 @@ func ResetEvent(event Handle) (err error) { return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -2823,6 +2993,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2951,6 +3145,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3036,6 +3238,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3182,6 +3392,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { @@ -3269,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { @@ -3468,12 +3694,8 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3504,6 +3726,14 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb return } +func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { @@ -3768,9 +3998,9 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } @@ -3793,6 +4023,19 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui return } +func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { + syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + return +} + +func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitWindowsEx(flags uint32, reason uint32) (err error) { r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { @@ -3801,6 +4044,35 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { return } +func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { + r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + copied = int32(r0) + if copied == 0 { + err = errnoErr(e1) + } + return +} + +func GetDesktopWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetForegroundWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -3816,6 +4088,24 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { return } +func IsWindow(hwnd HWND) (isWindow bool) { + r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + isWindow = r0 != 0 + return +} + +func IsWindowUnicode(hwnd HWND) (isUnicode bool) { + r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + isUnicode = r0 != 0 + return +} + +func IsWindowVisible(hwnd HWND) (isVisible bool) { + r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + isVisible = r0 != 0 + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -3905,6 +4195,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { @@ -3963,6 +4269,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo return } +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) if r1 == socket_error { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/expect.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/expect.go index f5172ceab..fdc023c89 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/expect.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/expect.go @@ -4,7 +4,7 @@ /* Package expect provides support for interpreting structured comments in Go -source code as test expectations. +source code (including go.mod and go.work files) as test expectations. This is primarily intended for writing tests of things that process Go source files, although it does not directly depend on the testing package. diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/extract.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/extract.go index a01b8ce9c..c571c5ba4 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/extract.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/expect/extract.go @@ -54,7 +54,7 @@ func Parse(fset *token.FileSet, filename string, content []byte) ([]*Note, error } f := fset.AddFile(filename, -1, len(content)) f.SetLinesForContent(content) - notes, err := extractMod(fset, file) + notes, err := extractModWork(fset, file.Syntax.Stmt) if err != nil { return nil, err } @@ -64,39 +64,45 @@ func Parse(fset *token.FileSet, filename string, content []byte) ([]*Note, error note.Pos += token.Pos(f.Base()) } return notes, nil + case ".work": + file, err := modfile.ParseWork(filename, content, nil) + if err != nil { + return nil, err + } + f := fset.AddFile(filename, -1, len(content)) + f.SetLinesForContent(content) + notes, err := extractModWork(fset, file.Syntax.Stmt) + if err != nil { + return nil, err + } + // As with go.mod files, we need to compute a synthetic token.Pos. + for _, note := range notes { + note.Pos += token.Pos(f.Base()) + } + return notes, nil } return nil, nil } -// extractMod collects all the notes present in a go.mod file. +// extractModWork collects all the notes present in a go.mod file or go.work +// file, by way of the shared modfile.Expr statement node. +// // Each comment whose text starts with @ is parsed as a comma-separated // sequence of notes. // See the package documentation for details about the syntax of those // notes. // Only allow notes to appear with the following format: "//@mark()" or // @mark() -func extractMod(fset *token.FileSet, file *modfile.File) ([]*Note, error) { +func extractModWork(fset *token.FileSet, exprs []modfile.Expr) ([]*Note, error) { var notes []*Note - for _, stmt := range file.Syntax.Stmt { + for _, stmt := range exprs { comment := stmt.Comment() if comment == nil { continue } - // Handle the case for markers of `// indirect` to be on the line before - // the require statement. - // TODO(golang/go#36894): have a more intuitive approach for // indirect - for _, cmt := range comment.Before { - text, adjust := getAdjustedNote(cmt.Token) - if text == "" { - continue - } - parsed, err := parse(fset, token.Pos(int(cmt.Start.Byte)+adjust), text) - if err != nil { - return nil, err - } - notes = append(notes, parsed...) - } - // Handle the normal case for markers on the same line. - for _, cmt := range comment.Suffix { + var allComments []modfile.Comment + allComments = append(allComments, comment.Before...) + allComments = append(allComments, comment.Suffix...) + for _, cmt := range allComments { text, adjust := getAdjustedNote(cmt.Token) if text == "" { continue diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 2ed25a750..137cc8df1 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -27,10 +27,9 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os/exec" - "golang.org/x/tools/go/internal/gcimporter" + "golang.org/x/tools/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file @@ -48,7 +47,7 @@ import ( func Find(importPath, srcDir string) (filename, path string) { cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) cmd.Dir = srcDir - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { return "", "" } @@ -85,9 +84,26 @@ func NewReader(r io.Reader) (io.Reader, error) { } } +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + // Read reads export data from in, decodes it, and returns type // information for the package. -// The package name is specified by path. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// // File position information is added to fset. // // Read may inspect and add to the imports map to ensure that references @@ -98,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) { // // On return, the state of the reader is undefined. func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := ioutil.ReadAll(in) + data, err := readAll(in) if err != nil { return nil, fmt.Errorf("reading export data for %q: %v", path, err) } @@ -107,26 +123,19 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) } - // The App Engine Go runtime v1.6 uses the old export data format. - // TODO(adonovan): delete once v1.7 has been around for a while. - if bytes.HasPrefix(data, []byte("package ")) { - return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) - } - // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err @@ -161,7 +170,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { // // Experimental: This API is experimental and may change in the future. func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { - data, err := ioutil.ReadAll(in) + data, err := readAll(in) if err != nil { return nil, fmt.Errorf("reading export bundle: %v", err) } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go deleted file mode 100644 index 196cb3f9b..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ /dev/null @@ -1,853 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !ast.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !ast.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if ast.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !ast.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go deleted file mode 100644 index b85de0147..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go +++ /dev/null @@ -1,1053 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. - -package gcimporter - -import ( - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - -// Synthesize a token.Pos -type fakeFileSet struct { - fset *token.FileSet - files map[string]*fileInfo -} - -type fileInfo struct { - file *token.File - lastline int -} - -const maxlines = 64 * 1024 - -func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. - - // Since we don't know the set of needed file positions, we reserve maxlines - // positions per file. We delay calling token.File.SetLines until all - // positions have been calculated (by way of fakeFileSet.setLines), so that - // we can avoid setting unnecessary lines. See also golang/go#46586. - f := s.files[file] - if f == nil { - f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} - s.files[file] = f - } - if line > maxlines { - line = 1 - } - if line > f.lastline { - f.lastline = line - } - - // Return a fake position assuming that f.file consists only of newlines. - return token.Pos(f.file.Base() + line - 1) -} - -func (s *fakeFileSet) setLines() { - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - for _, f := range s.files { - f.file.SetLines(fakeLines[:f.lastline]) - } -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - -func chanDir(d int) types.ChanDir { - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d { - case 1 /* Crecv */ : - return types.RecvOnly - case 2 /* Csend */ : - return types.SendOnly - case 3 /* Cboth */ : - return types.SendRecv - default: - errorf("unexpected channel dir %d", d) - return 0 - } -} - -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go deleted file mode 100644 index e96c39600..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ /dev/null @@ -1,1125 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, -// but it also contains the original source-based importer code for Go1.6. -// Once we stop supporting 1.6, we can remove that code. - -// Package gcimporter provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" - -import ( - "bufio" - "errors" - "fmt" - "go/build" - "go/constant" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "text/scanner" -) - -const ( - // Enable debug during development: it adds some additional checks, and - // prevents errors from being recovered. - debug = false - - // If trace is set, debugging output is printed to std out. - trace = false -) - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - id = path // make sure we have an id to print in error message - return - } - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// ImportData imports a package by reading the gc-generated export data, -// adds the corresponding package object to the packages map indexed by id, -// and returns the object. -// -// The packages map must contains all packages already imported. The data -// reader position must be the beginning of the export data section. The -// filename is only used in error messages. -// -// If packages[id] contains the completely imported package, that package -// can be used directly, and there is no need to call this function (but -// there is also no harm but for extra time used). -func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { - // support for parser error handling - defer func() { - switch r := recover().(type) { - case nil: - // nothing to do - case importError: - err = r - default: - panic(r) // internal error - } - }() - - var p parser - p.init(filename, id, data, packages) - pkg = p.parseExport() - - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string - if lookup != nil { - // With custom lookup specified, assume that caller has - // converted path to a canonical import path for use in the map. - if path == "unsafe" { - return types.Unsafe, nil - } - id = path - - // No need to re-import if the package was imported completely before. - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - f, err := lookup(path) - if err != nil { - return nil, err - } - rc = f - } else { - filename, id = FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %q", id) - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // add file name to error - err = fmt.Errorf("%s: %v", filename, err) - } - }() - rc = f - } - defer rc.Close() - - var hdr string - var size int64 - buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$\n": - // Work-around if we don't have a filename; happens only if lookup != nil. - // Either way, the filename is only needed for importer error messages, so - // this is fine. - if filename == "" { - filename = path - } - return ImportData(packages, filename, id, buf) - - case "$$B\n": - var data []byte - data, err = ioutil.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) - return pkg, err - - case 'u': - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -// ---------------------------------------------------------------------------- -// Parser - -// TODO(gri) Imported objects don't have position information. -// Ideally use the debug table line info; alternatively -// create some fake position (or the position of the -// import). That way error messages referring to imported -// objects can print meaningful information. - -// parser parses the exports inside a gc compiler-produced -// object/archive file and populates its scope with the results. -type parser struct { - scanner scanner.Scanner - tok rune // current token - lit string // literal string; only valid for Ident, Int, String tokens - id string // package id of imported package - sharedPkgs map[string]*types.Package // package id -> package object (across importer) - localPkgs map[string]*types.Package // package id -> package object (just this package) -} - -func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { - p.scanner.Init(src) - p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<' ' - p.scanner.Filename = filename // for good error messages - p.next() - p.id = id - p.sharedPkgs = packages - if debug { - // check consistency of packages map - for _, pkg := range packages { - if pkg.Name() == "" { - fmt.Printf("no package name for %s\n", pkg.Path()) - } - } - } -} - -func (p *parser) next() { - p.tok = p.scanner.Scan() - switch p.tok { - case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': - p.lit = p.scanner.TokenText() - default: - p.lit = "" - } - if debug { - fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) - } -} - -func declTypeName(pkg *types.Package, name string) *types.TypeName { - scope := pkg.Scope() - if obj := scope.Lookup(name); obj != nil { - return obj.(*types.TypeName) - } - obj := types.NewTypeName(token.NoPos, pkg, name, nil) - // a named type may be referred to before the underlying type - // is known - set it up - types.NewNamed(obj, nil, nil) - scope.Insert(obj) - return obj -} - -// ---------------------------------------------------------------------------- -// Error handling - -// Internal errors are boxed as importErrors. -type importError struct { - pos scanner.Position - err error -} - -func (e importError) Error() string { - return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) -} - -func (p *parser) error(err interface{}) { - if s, ok := err.(string); ok { - err = errors.New(s) - } - // panic with a runtime.Error if err is not an error - panic(importError{p.scanner.Pos(), err.(error)}) -} - -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Sprintf(format, args...)) -} - -func (p *parser) expect(tok rune) string { - lit := p.lit - if p.tok != tok { - p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) - } - p.next() - return lit -} - -func (p *parser) expectSpecial(tok string) { - sep := 'x' // not white space - i := 0 - for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - i++ - } - if i < len(tok) { - p.errorf("expected %q, got %q", tok, tok[0:i]) - } -} - -func (p *parser) expectKeyword(keyword string) { - lit := p.expect(scanner.Ident) - if lit != keyword { - p.errorf("expected keyword %s, got %q", keyword, lit) - } -} - -// ---------------------------------------------------------------------------- -// Qualified and unqualified names - -// parsePackageID parses a PackageId: -// -// PackageId = string_lit . -func (p *parser) parsePackageID() string { - id, err := strconv.Unquote(p.expect(scanner.String)) - if err != nil { - p.error(err) - } - // id == "" stands for the imported package id - // (only known at time of package installation) - if id == "" { - id = p.id - } - return id -} - -// parsePackageName parse a PackageName: -// -// PackageName = ident . -func (p *parser) parsePackageName() string { - return p.expect(scanner.Ident) -} - -// parseDotIdent parses a dotIdentifier: -// -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . -func (p *parser) parseDotIdent() string { - ident := "" - if p.tok != scanner.Int { - sep := 'x' // not white space - for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { - ident += p.lit - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - } - } - if ident == "" { - p.expect(scanner.Ident) // use expect() for error handling - } - return ident -} - -// parseQualifiedName parses a QualifiedName: -// -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . -func (p *parser) parseQualifiedName() (id, name string) { - p.expect('@') - id = p.parsePackageID() - p.expect('.') - // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. - if p.tok == '?' { - p.next() - } else { - name = p.parseDotIdent() - } - return -} - -// getPkg returns the package for a given id. If the package is -// not found, create the package and add it to the p.localPkgs -// and p.sharedPkgs maps. name is the (expected) name of the -// package. If name == "", the package name is expected to be -// set later via an import clause in the export data. -// -// id identifies a package, usually by a canonical package path like -// "encoding/json" but possibly by a non-canonical import path like -// "./json". -func (p *parser) getPkg(id, name string) *types.Package { - // package unsafe is not in the packages maps - handle explicitly - if id == "unsafe" { - return types.Unsafe - } - - pkg := p.localPkgs[id] - if pkg == nil { - // first import of id from this package - pkg = p.sharedPkgs[id] - if pkg == nil { - // first import of id by this importer; - // add (possibly unnamed) pkg to shared packages - pkg = types.NewPackage(id, name) - p.sharedPkgs[id] = pkg - } - // add (possibly unnamed) pkg to local packages - if p.localPkgs == nil { - p.localPkgs = make(map[string]*types.Package) - } - p.localPkgs[id] = pkg - } else if name != "" { - // package exists already and we have an expected package name; - // make sure names match or set package name if necessary - if pname := pkg.Name(); pname == "" { - pkg.SetName(name) - } else if pname != name { - p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) - } - } - return pkg -} - -// parseExportedName is like parseQualifiedName, but -// the package id is resolved to an imported *types.Package. -func (p *parser) parseExportedName() (pkg *types.Package, name string) { - id, name := p.parseQualifiedName() - pkg = p.getPkg(id, "") - return -} - -// ---------------------------------------------------------------------------- -// Types - -// parseBasicType parses a BasicType: -// -// BasicType = identifier . -func (p *parser) parseBasicType() types.Type { - id := p.expect(scanner.Ident) - obj := types.Universe.Lookup(id) - if obj, ok := obj.(*types.TypeName); ok { - return obj.Type() - } - p.errorf("not a basic type: %s", id) - return nil -} - -// parseArrayType parses an ArrayType: -// -// ArrayType = "[" int_lit "]" Type . -func (p *parser) parseArrayType(parent *types.Package) types.Type { - // "[" already consumed and lookahead known not to be "]" - lit := p.expect(scanner.Int) - p.expect(']') - elem := p.parseType(parent) - n, err := strconv.ParseInt(lit, 10, 64) - if err != nil { - p.error(err) - } - return types.NewArray(elem, n) -} - -// parseMapType parses a MapType: -// -// MapType = "map" "[" Type "]" Type . -func (p *parser) parseMapType(parent *types.Package) types.Type { - p.expectKeyword("map") - p.expect('[') - key := p.parseType(parent) - p.expect(']') - elem := p.parseType(parent) - return types.NewMap(key, elem) -} - -// parseName parses a Name: -// -// Name = identifier | "?" | QualifiedName . -// -// For unqualified and anonymous names, the returned package is the parent -// package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the name -// is an unqualified struct field or interface method name belonging to a -// type declared in another package.) -// -// For qualified names, the returned package is nil (and not created if -// it doesn't exist yet) unless materializePkg is set (which creates an -// unnamed package with valid package path). In the latter case, a -// subsequent import clause is expected to provide a name for the package. -func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { - pkg = parent - if pkg == nil { - pkg = p.sharedPkgs[p.id] - } - switch p.tok { - case scanner.Ident: - name = p.lit - p.next() - case '?': - // anonymous - p.next() - case '@': - // exported name prefixed with package path - pkg = nil - var id string - id, name = p.parseQualifiedName() - if materializePkg { - pkg = p.getPkg(id, "") - } - default: - p.error("name expected") - } - return -} - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - -// parseField parses a Field: -// -// Field = Name Type [ string_lit ] . -func (p *parser) parseField(parent *types.Package) (*types.Var, string) { - pkg, name := p.parseName(parent, true) - - if name == "_" { - // Blank fields should be package-qualified because they - // are unexported identifiers, but gc does not qualify them. - // Assuming that the ident belongs to the current package - // causes types to change during re-exporting, leading - // to spurious "can't assign A to B" errors from go/types. - // As a workaround, pretend all blank fields belong - // to the same unique dummy package. - const blankpkg = "<_>" - pkg = p.getPkg(blankpkg, blankpkg) - } - - typ := p.parseType(parent) - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - p.errorf("anonymous field expected") - } - anonymous = true - } - tag := "" - if p.tok == scanner.String { - s := p.expect(scanner.String) - var err error - tag, err = strconv.Unquote(s) - if err != nil { - p.errorf("invalid struct tag %s: %s", s, err) - } - } - return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag -} - -// parseStructType parses a StructType: -// -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . -func (p *parser) parseStructType(parent *types.Package) types.Type { - var fields []*types.Var - var tags []string - - p.expectKeyword("struct") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - fld, tag := p.parseField(parent) - if tag != "" && tags == nil { - tags = make([]string, i) - } - if tags != nil { - tags = append(tags, tag) - } - fields = append(fields, fld) - } - p.expect('}') - - return types.NewStruct(fields, tags) -} - -// parseParameter parses a Parameter: -// -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . -func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { - _, name := p.parseName(nil, false) - // remove gc-specific parameter numbering - if i := strings.Index(name, "·"); i >= 0 { - name = name[:i] - } - if p.tok == '.' { - p.expectSpecial("...") - isVariadic = true - } - typ := p.parseType(nil) - if isVariadic { - typ = types.NewSlice(typ) - } - // ignore argument tag (e.g. "noescape") - if p.tok == scanner.String { - p.next() - } - // TODO(gri) should we provide a package? - par = types.NewVar(token.NoPos, nil, name, typ) - return -} - -// parseParameters parses a Parameters: -// -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . -func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { - p.expect('(') - for p.tok != ')' && p.tok != scanner.EOF { - if len(list) > 0 { - p.expect(',') - } - par, variadic := p.parseParameter() - list = append(list, par) - if variadic { - if isVariadic { - p.error("... not on final argument") - } - isVariadic = true - } - } - p.expect(')') - - return -} - -// parseSignature parses a Signature: -// -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . -func (p *parser) parseSignature(recv *types.Var) *types.Signature { - params, isVariadic := p.parseParameters() - - // optional result type - var results []*types.Var - if p.tok == '(' { - var variadic bool - results, variadic = p.parseParameters() - if variadic { - p.error("... not permitted on result type") - } - } - - return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) -} - -// parseInterfaceType parses an InterfaceType: -// -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . -// -// The methods of embedded interfaces are always "inlined" -// by the compiler and thus embedded interfaces are never -// visible in the export data. -func (p *parser) parseInterfaceType(parent *types.Package) types.Type { - var methods []*types.Func - - p.expectKeyword("interface") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - pkg, name := p.parseName(parent, true) - sig := p.parseSignature(nil) - methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) - } - p.expect('}') - - // Complete requires the type's embedded interfaces to be fully defined, - // but we do not define any - return newInterface(methods, nil).Complete() -} - -// parseChanType parses a ChanType: -// -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . -func (p *parser) parseChanType(parent *types.Package) types.Type { - dir := types.SendRecv - if p.tok == scanner.Ident { - p.expectKeyword("chan") - if p.tok == '<' { - p.expectSpecial("<-") - dir = types.SendOnly - } - } else { - p.expectSpecial("<-") - p.expectKeyword("chan") - dir = types.RecvOnly - } - elem := p.parseType(parent) - return types.NewChan(dir, elem) -} - -// parseType parses a Type: -// -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . -// -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . -func (p *parser) parseType(parent *types.Package) types.Type { - switch p.tok { - case scanner.Ident: - switch p.lit { - default: - return p.parseBasicType() - case "struct": - return p.parseStructType(parent) - case "func": - // FuncType - p.next() - return p.parseSignature(nil) - case "interface": - return p.parseInterfaceType(parent) - case "map": - return p.parseMapType(parent) - case "chan": - return p.parseChanType(parent) - } - case '@': - // TypeName - pkg, name := p.parseExportedName() - return declTypeName(pkg, name).Type() - case '[': - p.next() // look ahead - if p.tok == ']' { - // SliceType - p.next() - return types.NewSlice(p.parseType(parent)) - } - return p.parseArrayType(parent) - case '*': - // PointerType - p.next() - return types.NewPointer(p.parseType(parent)) - case '<': - return p.parseChanType(parent) - case '(': - // "(" Type ")" - p.next() - typ := p.parseType(parent) - p.expect(')') - return typ - } - p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) - return nil -} - -// ---------------------------------------------------------------------------- -// Declarations - -// parseImportDecl parses an ImportDecl: -// -// ImportDecl = "import" PackageName PackageId . -func (p *parser) parseImportDecl() { - p.expectKeyword("import") - name := p.parsePackageName() - p.getPkg(p.parsePackageID(), name) -} - -// parseInt parses an int_lit: -// -// int_lit = [ "+" | "-" ] { "0" ... "9" } . -func (p *parser) parseInt() string { - s := "" - switch p.tok { - case '-': - s = "-" - p.next() - case '+': - p.next() - } - return s + p.expect(scanner.Int) -} - -// parseNumber parses a number: -// -// number = int_lit [ "p" int_lit ] . -func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { - // mantissa - mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) - if mant == nil { - panic("invalid mantissa") - } - - if p.lit == "p" { - // exponent (base 2) - p.next() - exp, err := strconv.ParseInt(p.parseInt(), 10, 0) - if err != nil { - p.error(err) - } - if exp < 0 { - denom := constant.MakeInt64(1) - denom = constant.Shift(denom, token.SHL, uint(-exp)) - typ = types.Typ[types.UntypedFloat] - val = constant.BinaryOp(mant, token.QUO, denom) - return - } - if exp > 0 { - mant = constant.Shift(mant, token.SHL, uint(exp)) - } - typ = types.Typ[types.UntypedFloat] - val = mant - return - } - - typ = types.Typ[types.UntypedInt] - val = mant - return -} - -// parseConstDecl parses a ConstDecl: -// -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . -func (p *parser) parseConstDecl() { - p.expectKeyword("const") - pkg, name := p.parseExportedName() - - var typ0 types.Type - if p.tok != '=' { - // constant types are never structured - no need for parent type - typ0 = p.parseType(nil) - } - - p.expect('=') - var typ types.Type - var val constant.Value - switch p.tok { - case scanner.Ident: - // bool_lit - if p.lit != "true" && p.lit != "false" { - p.error("expected true or false") - } - typ = types.Typ[types.UntypedBool] - val = constant.MakeBool(p.lit == "true") - p.next() - - case '-', scanner.Int: - // int_lit - typ, val = p.parseNumber() - - case '(': - // complex_lit or rune_lit - p.next() - if p.tok == scanner.Char { - p.next() - p.expect('+') - typ = types.Typ[types.UntypedRune] - _, val = p.parseNumber() - p.expect(')') - break - } - _, re := p.parseNumber() - p.expect('+') - _, im := p.parseNumber() - p.expectKeyword("i") - p.expect(')') - typ = types.Typ[types.UntypedComplex] - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - case scanner.Char: - // rune_lit - typ = types.Typ[types.UntypedRune] - val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) - p.next() - - case scanner.String: - // string_lit - typ = types.Typ[types.UntypedString] - val = constant.MakeFromLiteral(p.lit, token.STRING, 0) - p.next() - - default: - p.errorf("expected literal got %s", scanner.TokenString(p.tok)) - } - - if typ0 == nil { - typ0 = typ - } - - pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) -} - -// parseTypeDecl parses a TypeDecl: -// -// TypeDecl = "type" ExportedName Type . -func (p *parser) parseTypeDecl() { - p.expectKeyword("type") - pkg, name := p.parseExportedName() - obj := declTypeName(pkg, name) - - // The type object may have been imported before and thus already - // have a type associated with it. We still need to parse the type - // structure, but throw it away if the object already has a type. - // This ensures that all imports refer to the same type object for - // a given type declaration. - typ := p.parseType(pkg) - - if name := obj.Type().(*types.Named); name.Underlying() == nil { - name.SetUnderlying(typ) - } -} - -// parseVarDecl parses a VarDecl: -// -// VarDecl = "var" ExportedName Type . -func (p *parser) parseVarDecl() { - p.expectKeyword("var") - pkg, name := p.parseExportedName() - typ := p.parseType(pkg) - pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) -} - -// parseFunc parses a Func: -// -// Func = Signature [ Body ] . -// Body = "{" ... "}" . -func (p *parser) parseFunc(recv *types.Var) *types.Signature { - sig := p.parseSignature(recv) - if p.tok == '{' { - p.next() - for i := 1; i > 0; p.next() { - switch p.tok { - case '{': - i++ - case '}': - i-- - } - } - } - return sig -} - -// parseMethodDecl parses a MethodDecl: -// -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . -func (p *parser) parseMethodDecl() { - // "func" already consumed - p.expect('(') - recv, _ := p.parseParameter() // receiver - p.expect(')') - - // determine receiver base type object - base := deref(recv.Type()).(*types.Named) - - // parse method name, signature, and possibly inlined body - _, name := p.parseName(nil, false) - sig := p.parseFunc(recv) - - // methods always belong to the same package as the base type object - pkg := base.Obj().Pkg() - - // add method to type unless type was imported before - // and method exists already - // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. - base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) -} - -// parseFuncDecl parses a FuncDecl: -// -// FuncDecl = "func" ExportedName Func . -func (p *parser) parseFuncDecl() { - // "func" already consumed - pkg, name := p.parseExportedName() - typ := p.parseFunc(nil) - pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) -} - -// parseDecl parses a Decl: -// -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . -func (p *parser) parseDecl() { - if p.tok == scanner.Ident { - switch p.lit { - case "import": - p.parseImportDecl() - case "const": - p.parseConstDecl() - case "type": - p.parseTypeDecl() - case "var": - p.parseVarDecl() - case "func": - p.next() // look ahead - if p.tok == '(' { - p.parseMethodDecl() - } else { - p.parseFuncDecl() - } - } - } - p.expect('\n') -} - -// ---------------------------------------------------------------------------- -// Export - -// parseExport parses an Export: -// -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . -func (p *parser) parseExport() *types.Package { - p.expectKeyword("package") - name := p.parsePackageName() - if p.tok == scanner.Ident && p.lit == "safe" { - // package was compiled with -u option - ignore - p.next() - } - p.expect('\n') - - pkg := p.getPkg(p.id, name) - - for p.tok != '$' && p.tok != scanner.EOF { - p.parseDecl() - } - - if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { - // don't call next()/expect() since reading past the - // export data may cause scanner errors (e.g. NUL chars) - p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) - } - - if n := p.scanner.ErrorCount; n != 0 { - p.errorf("expected no scanner errors, got %d", n) - } - - // Record all locally referenced packages as imports. - var imports []*types.Package - for id, pkg2 := range p.localPkgs { - if pkg2.Name() == "" { - p.errorf("%s package has no name", id) - } - if id == p.id { - continue // avoid self-edge - } - imports = append(imports, pkg2) - } - sort.Sort(byPath(imports)) - pkg.SetImports(imports) - - // package was imported completely and without errors - pkg.MarkComplete() - - return pkg -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go deleted file mode 100644 index 9a4ff329e..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ /dev/null @@ -1,1010 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/internal/typeparams" -) - -// Current bundled export format version. Increase with each format change. -// 0: initial implementation -const bundleVersion = 0 - -// IExportData writes indexed export data for pkg to out. -// -// If no file set is provided, position info will be missing. -// The package path of the top-level package will not be recorded, -// so that calls to IImportData can override with a provided package path. -func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg}) -} - -// IExportBundle writes an indexed export bundle for pkgs to out. -func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - return iexportCommon(out, fset, true, iexportVersion, pkgs) -} - -func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := iexporter{ - fset: fset, - version: version, - allPkgs: map[*types.Package]bool{}, - stringIndex: map[string]uint64{}, - declIndex: map[types.Object]uint64{}, - tparamNames: map[types.Object]string{}, - typIndex: map[types.Type]uint64{}, - } - if !bundle { - p.localpkg = pkgs[0] - } - - for i, pt := range predeclared() { - p.typIndex[pt] = uint64(i) - } - if len(p.typIndex) > predeclReserved { - panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) - } - - // Initialize work queue with exported declarations. - for _, pkg := range pkgs { - scope := pkg.Scope() - for _, name := range scope.Names() { - if ast.IsExported(name) { - p.pushDecl(scope.Lookup(name)) - } - } - - if bundle { - // Ensure pkg and its imports are included in the index. - p.allPkgs[pkg] = true - for _, imp := range pkg.Imports() { - p.allPkgs[imp] = true - } - } - } - - // Loop until no more work. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popHead()) - } - - // Append indices to data0 section. - dataLen := uint64(p.data0.Len()) - w := p.newWriter() - w.writeIndex(p.declIndex) - - if bundle { - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.pkg(pkg) - imps := pkg.Imports() - w.uint64(uint64(len(imps))) - for _, imp := range imps { - w.pkg(imp) - } - } - } - w.flush() - - // Assemble header. - var hdr intWriter - if bundle { - hdr.uint64(bundleVersion) - } - hdr.uint64(uint64(p.version)) - hdr.uint64(uint64(p.strings.Len())) - hdr.uint64(dataLen) - - // Flush output. - io.Copy(out, &hdr) - io.Copy(out, &p.strings) - io.Copy(out, &p.data0) - - return nil -} - -// writeIndex writes out an object index. mainIndex indicates whether -// we're writing out the main index, which is also read by -// non-compiler tools and includes a complete package description -// (i.e., name and height). -func (w *exportWriter) writeIndex(index map[types.Object]uint64) { - type pkgObj struct { - obj types.Object - name string // qualified name; differs from obj.Name for type params - } - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Package][]pkgObj{} - - // For the main index, make sure to include every package that - // we reference, even if we're not exporting (or reexporting) - // any symbols from it. - if w.p.localpkg != nil { - pkgObjs[w.p.localpkg] = nil - } - for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil - } - - for obj := range index { - name := w.p.exportName(obj) - pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) - } - - var pkgs []*types.Package - for pkg, objs := range pkgObjs { - pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].name < objs[j].name - }) - } - - sort.Slice(pkgs, func(i, j int) bool { - return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) - }) - - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.string(w.exportPath(pkg)) - w.string(pkg.Name()) - w.uint64(uint64(0)) // package height is not needed for go/types - - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, obj := range objs { - w.string(obj.name) - w.uint64(index[obj.obj]) - } - } -} - -// exportName returns the 'exported' name of an object. It differs from -// obj.Name() only for type parameters (see tparamExportName for details). -func (p *iexporter) exportName(obj types.Object) (res string) { - if name := p.tparamNames[obj]; name != "" { - return name - } - return obj.Name() -} - -type iexporter struct { - fset *token.FileSet - out *bytes.Buffer - version int - - localpkg *types.Package - - // allPkgs tracks all packages that have been referenced by - // the export data, so we can ensure to include them in the - // main index. - allPkgs map[*types.Package]bool - - declTodo objQueue - - strings intWriter - stringIndex map[string]uint64 - - data0 intWriter - declIndex map[types.Object]uint64 - tparamNames map[types.Object]string // typeparam->exported name - typIndex map[types.Type]uint64 - - indent int // for tracing support -} - -func (p *iexporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -// stringOff returns the offset of s within the string section. -// If not already present, it's added to the end. -func (p *iexporter) stringOff(s string) uint64 { - off, ok := p.stringIndex[s] - if !ok { - off = uint64(p.strings.Len()) - p.stringIndex[s] = off - - p.strings.uint64(uint64(len(s))) - p.strings.WriteString(s) - } - return off -} - -// pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(obj types.Object) { - // Package unsafe is known to the compiler and predeclared. - // Caller should not ask us to do export it. - if obj.Pkg() == types.Unsafe { - panic("cannot export package unsafe") - } - - if _, ok := p.declIndex[obj]; ok { - return - } - - p.declIndex[obj] = ^uint64(0) // mark obj present in work queue - p.declTodo.pushTail(obj) -} - -// exportWriter handles writing out individual data section chunks. -type exportWriter struct { - p *iexporter - - data intWriter - currPkg *types.Package - prevFile string - prevLine int64 - prevColumn int64 -} - -func (w *exportWriter) exportPath(pkg *types.Package) string { - if pkg == w.p.localpkg { - return "" - } - return pkg.Path() -} - -func (p *iexporter) doDecl(obj types.Object) { - if trace { - p.trace("exporting decl %v (%T)", obj, obj) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", obj) - }() - } - w := p.newWriter() - w.setPkg(obj.Pkg(), false) - - switch obj := obj.(type) { - case *types.Var: - w.tag('V') - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - - case *types.Func: - sig, _ := obj.Type().(*types.Signature) - if sig.Recv() != nil { - panic(internalErrorf("unexpected method: %v", sig)) - } - - // Function. - if typeparams.ForSignature(sig).Len() == 0 { - w.tag('F') - } else { - w.tag('G') - } - w.pos(obj.Pos()) - // The tparam list of the function type is the declaration of the type - // params. So, write out the type params right now. Then those type params - // will be referenced via their type offset (via typOff) in all other - // places in the signature and function where they are used. - // - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { - w.tparamList(obj.Name(), tparams, obj.Pkg()) - } - w.signature(sig) - - case *types.Const: - w.tag('C') - w.pos(obj.Pos()) - w.value(obj.Type(), obj.Val()) - - case *types.TypeName: - t := obj.Type() - - if tparam, ok := t.(*typeparams.TypeParam); ok { - w.tag('P') - w.pos(obj.Pos()) - constraint := tparam.Constraint() - if p.version >= iexportVersionGo1_18 { - implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) - } - w.bool(implicit) - } - w.typ(constraint, obj.Pkg()) - break - } - - if obj.IsAlias() { - w.tag('A') - w.pos(obj.Pos()) - w.typ(t, obj.Pkg()) - break - } - - // Defined type. - named, ok := t.(*types.Named) - if !ok { - panic(internalErrorf("%s is not a defined type", t)) - } - - if typeparams.ForNamed(named).Len() == 0 { - w.tag('T') - } else { - w.tag('U') - } - w.pos(obj.Pos()) - - if typeparams.ForNamed(named).Len() > 0 { - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) - } - - underlying := obj.Type().Underlying() - w.typ(underlying, obj.Pkg()) - - if types.IsInterface(t) { - break - } - - n := named.NumMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := named.Method(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - - // Receiver type parameters are type arguments of the receiver type, so - // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { - prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) - name := tparamExportName(prefix, rparam) - w.p.tparamNames[rparam.Obj()] = name - } - } - w.param(sig.Recv()) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected object: %v", obj)) - } - - p.declIndex[obj] = w.flush() -} - -func (w *exportWriter) tag(tag byte) { - w.data.WriteByte(tag) -} - -func (w *exportWriter) pos(pos token.Pos) { - if w.p.version >= iexportVersionPosCol { - w.posV1(pos) - } else { - w.posV0(pos) - } -} - -func (w *exportWriter) posV1(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - column := int64(p.Column) - - deltaColumn := (column - w.prevColumn) << 1 - deltaLine := (line - w.prevLine) << 1 - - if file != w.prevFile { - deltaLine |= 1 - } - if deltaLine != 0 { - deltaColumn |= 1 - } - - w.int64(deltaColumn) - if deltaColumn&1 != 0 { - w.int64(deltaLine) - if deltaLine&1 != 0 { - w.string(file) - } - } - - w.prevFile = file - w.prevLine = line - w.prevColumn = column -} - -func (w *exportWriter) posV0(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - - // When file is the same as the last position (common case), - // we can save a few bytes by delta encoding just the line - // number. - // - // Note: Because data objects may be read out of order (or not - // at all), we can only apply delta encoding within a single - // object. This is handled implicitly by tracking prevFile and - // prevLine as fields of exportWriter. - - if file == w.prevFile { - delta := line - w.prevLine - w.int64(delta) - if delta == deltaNewFile { - w.int64(-1) - } - } else { - w.int64(deltaNewFile) - w.int64(line) // line >= 0 - w.string(file) - w.prevFile = file - } - w.prevLine = line -} - -func (w *exportWriter) pkg(pkg *types.Package) { - // Ensure any referenced packages are declared in the main index. - w.p.allPkgs[pkg] = true - - w.string(w.exportPath(pkg)) -} - -func (w *exportWriter) qualifiedIdent(obj types.Object) { - name := w.p.exportName(obj) - - // Ensure any referenced declarations are written out too. - w.p.pushDecl(obj) - w.string(name) - w.pkg(obj.Pkg()) -} - -func (w *exportWriter) typ(t types.Type, pkg *types.Package) { - w.data.uint64(w.p.typOff(t, pkg)) -} - -func (p *iexporter) newWriter() *exportWriter { - return &exportWriter{p: p} -} - -func (w *exportWriter) flush() uint64 { - off := uint64(w.p.data0.Len()) - io.Copy(&w.p.data0, &w.data) - return off -} - -func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { - off, ok := p.typIndex[t] - if !ok { - w := p.newWriter() - w.doTyp(t, pkg) - off = predeclReserved + w.flush() - p.typIndex[t] = off - } - return off -} - -func (w *exportWriter) startType(k itag) { - w.data.uint64(uint64(k)) -} - -func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { - if trace { - w.p.trace("exporting type %s (%T)", t, t) - w.p.indent++ - defer func() { - w.p.indent-- - w.p.trace("=> %s", t) - }() - } - switch t := t.(type) { - case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { - w.startType(instanceType) - // TODO(rfindley): investigate if this position is correct, and if it - // matters. - w.pos(t.Obj().Pos()) - w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) - return - } - w.startType(definedType) - w.qualifiedIdent(t.Obj()) - - case *typeparams.TypeParam: - w.startType(typeParamType) - w.qualifiedIdent(t.Obj()) - - case *types.Pointer: - w.startType(pointerType) - w.typ(t.Elem(), pkg) - - case *types.Slice: - w.startType(sliceType) - w.typ(t.Elem(), pkg) - - case *types.Array: - w.startType(arrayType) - w.uint64(uint64(t.Len())) - w.typ(t.Elem(), pkg) - - case *types.Chan: - w.startType(chanType) - // 1 RecvOnly; 2 SendOnly; 3 SendRecv - var dir uint64 - switch t.Dir() { - case types.RecvOnly: - dir = 1 - case types.SendOnly: - dir = 2 - case types.SendRecv: - dir = 3 - } - w.uint64(dir) - w.typ(t.Elem(), pkg) - - case *types.Map: - w.startType(mapType) - w.typ(t.Key(), pkg) - w.typ(t.Elem(), pkg) - - case *types.Signature: - w.startType(signatureType) - w.setPkg(pkg, true) - w.signature(t) - - case *types.Struct: - w.startType(structType) - w.setPkg(pkg, true) - - n := t.NumFields() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - f := t.Field(i) - w.pos(f.Pos()) - w.string(f.Name()) - w.typ(f.Type(), pkg) - w.bool(f.Anonymous()) - w.string(t.Tag(i)) // note (or tag) - } - - case *types.Interface: - w.startType(interfaceType) - w.setPkg(pkg, true) - - n := t.NumEmbeddeds() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - ft := t.EmbeddedType(i) - tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { - w.pos(named.Obj().Pos()) - } else { - w.pos(token.NoPos) - } - w.typ(ft, tPkg) - } - - n = t.NumExplicitMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := t.ExplicitMethod(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.signature(sig) - } - - case *typeparams.Union: - w.startType(unionType) - nt := t.Len() - w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { - term := t.Term(i) - w.bool(term.Tilde()) - w.typ(term.Type(), pkg) - } - - default: - panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) - } -} - -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) - } - - w.currPkg = pkg -} - -func (w *exportWriter) signature(sig *types.Signature) { - w.paramList(sig.Params()) - w.paramList(sig.Results()) - if sig.Params().Len() > 0 { - w.bool(sig.Variadic()) - } -} - -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { - w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) - } -} - -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { - ll := uint64(list.Len()) - w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - // Set the type parameter exportName before exporting its type. - exportName := tparamExportName(prefix, tparam) - w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) - } -} - -const blankMarker = "$" - -// tparamExportName returns the 'exported' name of a type parameter, which -// differs from its actual object name: it is prefixed with a qualifier, and -// blank type parameter names are disambiguated by their index in the type -// parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { - assert(prefix != "") - name := tparam.Obj().Name() - if name == "_" { - name = blankMarker + strconv.Itoa(tparam.Index()) - } - return prefix + "." + name -} - -// tparamName returns the real name of a type parameter, after stripping its -// qualifying prefix and reverting blank-name encoding. See tparamExportName -// for details. -func tparamName(exportName string) string { - // Remove the "path" from the type param name that makes it unique. - ix := strings.LastIndex(exportName, ".") - if ix < 0 { - errorf("malformed type parameter export name %s: missing prefix", exportName) - } - name := exportName[ix+1:] - if strings.HasPrefix(name, blankMarker) { - return "_" - } - return name -} - -func (w *exportWriter) paramList(tup *types.Tuple) { - n := tup.Len() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - w.param(tup.At(i)) - } -} - -func (w *exportWriter) param(obj types.Object) { - w.pos(obj.Pos()) - w.localIdent(obj) - w.typ(obj.Type(), obj.Pkg()) -} - -func (w *exportWriter) value(typ types.Type, v constant.Value) { - w.typ(typ, nil) - if w.p.version >= iexportVersionGo1_18 { - w.int64(int64(v.Kind())) - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - w.bool(constant.BoolVal(v)) - case types.IsInteger: - var i big.Int - if i64, exact := constant.Int64Val(v); exact { - i.SetInt64(i64) - } else if ui64, exact := constant.Uint64Val(v); exact { - i.SetUint64(ui64) - } else { - i.SetString(v.ExactString(), 10) - } - w.mpint(&i, typ) - case types.IsFloat: - f := constantToFloat(v) - w.mpfloat(f, typ) - case types.IsComplex: - w.mpfloat(constantToFloat(constant.Real(v)), typ) - w.mpfloat(constantToFloat(constant.Imag(v)), typ) - case types.IsString: - w.string(constant.StringVal(v)) - default: - if b.Kind() == types.Invalid { - // package contains type errors - break - } - panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) - } -} - -// constantToFloat converts a constant.Value with kind constant.Float to a -// big.Float. -func constantToFloat(x constant.Value) *big.Float { - x = constant.ToFloat(x) - // Use the same floating-point precision (512) as cmd/compile - // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). - const mpprec = 512 - var f big.Float - f.SetPrec(mpprec) - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - n := valueToRat(num) - d := valueToRat(denom) - f.SetRat(n.Quo(n, d)) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - _, ok := f.SetString(x.ExactString()) - assert(ok) - } - return &f -} - -// mpint exports a multi-precision integer. -// -// For unsigned types, small values are written out as a single -// byte. Larger values are written out as a length-prefixed big-endian -// byte string, where the length prefix is encoded as its complement. -// For example, bytes 0, 1, and 2 directly represent the integer -// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, -// 2-, and 3-byte big-endian string follow. -// -// Encoding for signed types use the same general approach as for -// unsigned types, except small values use zig-zag encoding and the -// bottom bit of length prefix byte for large values is reserved as a -// sign bit. -// -// The exact boundary between small and large encodings varies -// according to the maximum number of bytes needed to encode a value -// of type typ. As a special case, 8-bit types are always encoded as a -// single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ types.Type) { - basic, ok := typ.Underlying().(*types.Basic) - if !ok { - panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) - } - - signed, maxBytes := intSize(basic) - - negative := x.Sign() < 0 - if !signed && negative { - panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) - } - - b := x.Bytes() - if len(b) > 0 && b[0] == 0 { - panic(internalErrorf("leading zeros")) - } - if uint(len(b)) > maxBytes { - panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) - } - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - // Check if x can use small value encoding. - if len(b) <= 1 { - var ux uint - if len(b) == 1 { - ux = uint(b[0]) - } - if signed { - ux <<= 1 - if negative { - ux-- - } - } - if ux < maxSmall { - w.data.WriteByte(byte(ux)) - return - } - } - - n := 256 - uint(len(b)) - if signed { - n = 256 - 2*uint(len(b)) - if negative { - n |= 1 - } - } - if n < maxSmall || n >= 256 { - panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) - } - - w.data.WriteByte(byte(n)) - w.data.Write(b) -} - -// mpfloat exports a multi-precision floating point number. -// -// The number's value is decomposed into mantissa × 2**exponent, where -// mantissa is an integer. The value is written out as mantissa (as a -// multi-precision integer) and then the exponent, except exponent is -// omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { - if f.IsInf() { - panic("infinite constant") - } - - // Break into f = mant × 2**exp, with 0.5 <= mant < 1. - var mant big.Float - exp := int64(f.MantExp(&mant)) - - // Scale so that mant is an integer. - prec := mant.MinPrec() - mant.SetMantExp(&mant, int(prec)) - exp -= int64(prec) - - manti, acc := mant.Int(nil) - if acc != big.Exact { - panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) - } - w.mpint(manti, typ) - if manti.Sign() != 0 { - w.int64(exp) - } -} - -func (w *exportWriter) bool(b bool) bool { - var x uint64 - if b { - x = 1 - } - w.uint64(x) - return b -} - -func (w *exportWriter) int64(x int64) { w.data.int64(x) } -func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } -func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } - -func (w *exportWriter) localIdent(obj types.Object) { - // Anonymous parameters. - if obj == nil { - w.string("") - return - } - - name := obj.Name() - if name == "_" { - w.string("_") - return - } - - w.string(name) -} - -type intWriter struct { - bytes.Buffer -} - -func (w *intWriter) int64(x int64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], x) - w.Write(buf[:n]) -} - -func (w *intWriter) uint64(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - w.Write(buf[:n]) -} - -func assert(cond bool) { - if !cond { - panic("internal error: assertion failed") - } -} - -// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. - -// objQueue is a FIFO queue of types.Object. The zero value of objQueue is -// a ready-to-use empty queue. -type objQueue struct { - ring []types.Object - head, tail int -} - -// empty returns true if q contains no Nodes. -func (q *objQueue) empty() bool { - return q.head == q.tail -} - -// pushTail appends n to the tail of the queue. -func (q *objQueue) pushTail(obj types.Object) { - if len(q.ring) == 0 { - q.ring = make([]types.Object, 16) - } else if q.head+len(q.ring) == q.tail { - // Grow the ring. - nring := make([]types.Object, len(q.ring)*2) - // Copy the old elements. - part := q.ring[q.head%len(q.ring):] - if q.tail-q.head <= len(part) { - part = part[:q.tail-q.head] - copy(nring, part) - } else { - pos := copy(nring, part) - copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) - } - q.ring, q.head, q.tail = nring, 0, q.tail-q.head - } - - q.ring[q.tail%len(q.ring)] = obj - q.tail++ -} - -// popHead pops a node from the head of the queue. It panics if q is empty. -func (q *objQueue) popHead() types.Object { - if q.empty() { - panic("dequeue empty") - } - obj := q.ring[q.head%len(q.ring)] - q.head++ - return obj -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go deleted file mode 100644 index 4caa0f55d..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "sort" - "strings" - - "golang.org/x/tools/internal/typeparams" -) - -type intReader struct { - *bytes.Reader - path string -} - -func (r *intReader) int64() int64 { - i, err := binary.ReadVarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -func (r *intReader) uint64() uint64 { - i, err := binary.ReadUvarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -// Keep this in sync with constants in iexport.go. -const ( - iexportVersionGo1_11 = 0 - iexportVersionPosCol = 1 - iexportVersionGo1_18 = 2 - iexportVersionGenerics = 2 -) - -type ident struct { - pkg *types.Package - name string -} - -const predeclReserved = 32 - -type itag uint64 - -const ( - // Types - definedType itag = iota - pointerType - sliceType - arrayType - chanType - mapType - signatureType - structType - interfaceType - typeParamType - instanceType - unionType -) - -// IImportData imports a package from the serialized package data -// and returns 0 and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path) - if err != nil { - return 0, nil, err - } - return 0, pkgs[0], nil -} - -// IImportBundle imports a set of packages from the serialized package bundle. -func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "") -} - -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { - const currentVersion = 1 - version := int64(-1) - if !debug { - defer func() { - if e := recover(); e != nil { - if bundle { - err = fmt.Errorf("%v", e) - } else if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - } - - r := &intReader{bytes.NewReader(data), path} - - if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) - } - } - - version = int64(r.uint64()) - switch version { - case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: - default: - if version > iexportVersionGo1_18 { - errorf("unstable iexport format version %d, just rebuild compiler and std library", version) - } else { - errorf("unknown iexport format version %d", version) - } - } - - sLen := int64(r.uint64()) - dLen := int64(r.uint64()) - - whence, _ := r.Seek(0, io.SeekCurrent) - stringData := data[whence : whence+sLen] - declData := data[whence+sLen : whence+sLen+dLen] - r.Seek(sLen+dLen, io.SeekCurrent) - - p := iimporter{ - version: int(version), - ipath: path, - - stringData: stringData, - stringCache: make(map[uint64]string), - pkgCache: make(map[uint64]*types.Package), - - declData: declData, - pkgIndex: make(map[*types.Package]map[string]uint64), - typCache: make(map[uint64]types.Type), - // Separate map for typeparams, keyed by their package and unique - // name. - tparamIndex: make(map[ident]types.Type), - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - for i, pt := range predeclared() { - p.typCache[uint64(i)] = pt - } - - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { - pkgPathOff := r.uint64() - pkgPath := p.stringAt(pkgPathOff) - pkgName := p.stringAt(r.uint64()) - _ = r.uint64() // package height; unused by go/types - - if pkgPath == "" { - pkgPath = path - } - pkg := imports[pkgPath] - if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - - p.pkgCache[pkgPathOff] = pkg - - nameIndex := make(map[string]uint64) - for nSyms := r.uint64(); nSyms > 0; nSyms-- { - name := p.stringAt(r.uint64()) - nameIndex[name] = r.uint64() - } - - p.pkgIndex[pkg] = nameIndex - pkgList[i] = pkg - } - - if bundle { - pkgs = make([]*types.Package, r.uint64()) - for i := range pkgs { - pkg := p.pkgAt(r.uint64()) - imps := make([]*types.Package, r.uint64()) - for j := range imps { - imps[j] = p.pkgAt(r.uint64()) - } - pkg.SetImports(imps) - pkgs[i] = pkg - } - } else { - if len(pkgList) == 0 { - errorf("no packages found for %s", path) - panic("unreachable") - } - pkgs = pkgList[:1] - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) - sort.Sort(byPath(list)) - pkgs[0].SetImports(list) - } - - for _, pkg := range pkgs { - if pkg.Complete() { - continue - } - - names := make([]string, 0, len(p.pkgIndex[pkg])) - for name := range p.pkgIndex[pkg] { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - p.doDecl(pkg, name) - } - - // package was imported completely and without errors - pkg.MarkComplete() - } - - // SetConstraint can't be called if the constraint type is not yet complete. - // When type params are created in the 'P' case of (*importReader).obj(), - // the associated constraint type may not be complete due to recursion. - // Therefore, we defer calling SetConstraint there, and call it here instead - // after all types are complete. - for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) - } - - for _, typ := range p.interfaceList { - typ.Complete() - } - - return pkgs, nil -} - -type setConstraintArgs struct { - t *typeparams.TypeParam - constraint types.Type -} - -type iimporter struct { - version int - ipath string - - stringData []byte - stringCache map[uint64]string - pkgCache map[uint64]*types.Package - - declData []byte - pkgIndex map[*types.Package]map[string]uint64 - typCache map[uint64]types.Type - tparamIndex map[ident]types.Type - - fake fakeFileSet - interfaceList []*types.Interface - - // Arguments for calls to SetConstraint that are deferred due to recursive types - later []setConstraintArgs - - indent int // for tracing support -} - -func (p *iimporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -func (p *iimporter) doDecl(pkg *types.Package, name string) { - if debug { - p.trace("import decl %s", name) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", name) - }() - } - // See if we've already imported this declaration. - if obj := pkg.Scope().Lookup(name); obj != nil { - return - } - - off, ok := p.pkgIndex[pkg][name] - if !ok { - errorf("%v.%v not in index", pkg, name) - } - - r := &importReader{p: p, currPkg: pkg} - r.declReader.Reset(p.declData[off:]) - - r.obj(name) -} - -func (p *iimporter) stringAt(off uint64) string { - if s, ok := p.stringCache[off]; ok { - return s - } - - slen, n := binary.Uvarint(p.stringData[off:]) - if n <= 0 { - errorf("varint failed") - } - spos := off + uint64(n) - s := string(p.stringData[spos : spos+slen]) - p.stringCache[off] = s - return s -} - -func (p *iimporter) pkgAt(off uint64) *types.Package { - if pkg, ok := p.pkgCache[off]; ok { - return pkg - } - path := p.stringAt(off) - errorf("missing package %q in %q", path, p.ipath) - return nil -} - -func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { - if t, ok := p.typCache[off]; ok && canReuse(base, t) { - return t - } - - if off < predeclReserved { - errorf("predeclared type missing from cache: %v", off) - } - - r := &importReader{p: p} - r.declReader.Reset(p.declData[off-predeclReserved:]) - t := r.doType(base) - - if canReuse(base, t) { - p.typCache[off] = t - } - return t -} - -// canReuse reports whether the type rhs on the RHS of the declaration for def -// may be re-used. -// -// Specifically, if def is non-nil and rhs is an interface type with methods, it -// may not be re-used because we have a convention of setting the receiver type -// for interface methods to def. -func canReuse(def *types.Named, rhs types.Type) bool { - if def == nil { - return true - } - iface, _ := rhs.(*types.Interface) - if iface == nil { - return true - } - // Don't use iface.Empty() here as iface may not be complete. - return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 -} - -type importReader struct { - p *iimporter - declReader bytes.Reader - currPkg *types.Package - prevFile string - prevLine int64 - prevColumn int64 -} - -func (r *importReader) obj(name string) { - tag := r.byte() - pos := r.pos() - - switch tag { - case 'A': - typ := r.typ() - - r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) - - case 'C': - typ, val := r.value() - - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - - case 'F', 'G': - var tparams []*typeparams.TypeParam - if tag == 'G' { - tparams = r.tparamList() - } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - - case 'T', 'U': - // Types can be recursive. We need to setup a stub - // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) - named := types.NewNamed(obj, nil, nil) - // Declare obj before calling r.tparamList, so the new type name is recognized - // if used in the constraint of one of its own typeparams (see #48280). - r.declare(obj) - if tag == 'U' { - tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) - } - - underlying := r.p.typAt(r.uint64(), named).Underlying() - named.SetUnderlying(underlying) - - if !isInterface(underlying) { - for n := r.uint64(); n > 0; n-- { - mpos := r.pos() - mname := r.ident() - recv := r.param() - - // If the receiver has any targs, set those as the - // rparams of the method (since those are the - // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam - if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) - for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) - } - } - msig := r.signature(recv, rparams, nil) - - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) - } - } - - case 'P': - // We need to "declare" a typeparam in order to have a name that - // can be referenced recursively (if needed) in the type param's - // bound. - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) - - // To handle recursive references to the typeparam within its - // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} - r.p.tparamIndex[id] = t - var implicit bool - if r.p.version >= iexportVersionGo1_18 { - implicit = r.bool() - } - constraint := r.typ() - if implicit { - iface, _ := constraint.(*types.Interface) - if iface == nil { - errorf("non-interface constraint marked implicit") - } - typeparams.MarkImplicit(iface) - } - // The constraint type may not be complete, if we - // are in the middle of a type recursion involving type - // constraints. So, we defer SetConstraint until we have - // completely set up all types in ImportData. - r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) - - case 'V': - typ := r.typ() - - r.declare(types.NewVar(pos, r.currPkg, name, typ)) - - default: - errorf("unexpected tag: %v", tag) - } -} - -func (r *importReader) declare(obj types.Object) { - obj.Pkg().Scope().Insert(obj) -} - -func (r *importReader) value() (typ types.Type, val constant.Value) { - typ = r.typ() - if r.p.version >= iexportVersionGo1_18 { - // TODO: add support for using the kind. - _ = constant.Kind(r.int64()) - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - val = constant.MakeBool(r.bool()) - - case types.IsString: - val = constant.MakeString(r.string()) - - case types.IsInteger: - var x big.Int - r.mpint(&x, b) - val = constant.Make(&x) - - case types.IsFloat: - val = r.mpfloat(b) - - case types.IsComplex: - re := r.mpfloat(b) - im := r.mpfloat(b) - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - default: - if b.Kind() == types.Invalid { - val = constant.MakeUnknown() - return - } - errorf("unexpected type %v", typ) // panics - panic("unreachable") - } - - return -} - -func intSize(b *types.Basic) (signed bool, maxBytes uint) { - if (b.Info() & types.IsUntyped) != 0 { - return true, 64 - } - - switch b.Kind() { - case types.Float32, types.Complex64: - return true, 3 - case types.Float64, types.Complex128: - return true, 7 - } - - signed = (b.Info() & types.IsUnsigned) == 0 - switch b.Kind() { - case types.Int8, types.Uint8: - maxBytes = 1 - case types.Int16, types.Uint16: - maxBytes = 2 - case types.Int32, types.Uint32: - maxBytes = 4 - default: - maxBytes = 8 - } - - return -} - -func (r *importReader) mpint(x *big.Int, typ *types.Basic) { - signed, maxBytes := intSize(typ) - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - n, _ := r.declReader.ReadByte() - if uint(n) < maxSmall { - v := int64(n) - if signed { - v >>= 1 - if n&1 != 0 { - v = ^v - } - } - x.SetInt64(v) - return - } - - v := -n - if signed { - v = -(n &^ 1) >> 1 - } - if v < 1 || uint(v) > maxBytes { - errorf("weird decoding: %v, %v => %v", n, signed, v) - } - b := make([]byte, v) - io.ReadFull(&r.declReader, b) - x.SetBytes(b) - if signed && n&1 != 0 { - x.Neg(x) - } -} - -func (r *importReader) mpfloat(typ *types.Basic) constant.Value { - var mant big.Int - r.mpint(&mant, typ) - var f big.Float - f.SetInt(&mant) - if f.Sign() != 0 { - f.SetMantExp(&f, int(r.int64())) - } - return constant.Make(&f) -} - -func (r *importReader) ident() string { - return r.string() -} - -func (r *importReader) qualifiedIdent() (*types.Package, string) { - name := r.string() - pkg := r.pkg() - return pkg, name -} - -func (r *importReader) pos() token.Pos { - if r.p.version >= iexportVersionPosCol { - r.posv1() - } else { - r.posv0() - } - - if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { - return token.NoPos - } - return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) -} - -func (r *importReader) posv0() { - delta := r.int64() - if delta != deltaNewFile { - r.prevLine += delta - } else if l := r.int64(); l == -1 { - r.prevLine += deltaNewFile - } else { - r.prevFile = r.string() - r.prevLine = l - } -} - -func (r *importReader) posv1() { - delta := r.int64() - r.prevColumn += delta >> 1 - if delta&1 != 0 { - delta = r.int64() - r.prevLine += delta >> 1 - if delta&1 != 0 { - r.prevFile = r.string() - } - } -} - -func (r *importReader) typ() types.Type { - return r.p.typAt(r.uint64(), nil) -} - -func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) - return ok -} - -func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } -func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } - -func (r *importReader) doType(base *types.Named) (res types.Type) { - k := r.kind() - if debug { - r.p.trace("importing type %d (base: %s)", k, base) - r.p.indent++ - defer func() { - r.p.indent-- - r.p.trace("=> %s", res) - }() - } - switch k { - default: - errorf("unexpected kind tag in %q: %v", r.p.ipath, k) - return nil - - case definedType: - pkg, name := r.qualifiedIdent() - r.p.doDecl(pkg, name) - return pkg.Scope().Lookup(name).(*types.TypeName).Type() - case pointerType: - return types.NewPointer(r.typ()) - case sliceType: - return types.NewSlice(r.typ()) - case arrayType: - n := r.uint64() - return types.NewArray(r.typ(), int64(n)) - case chanType: - dir := chanDir(int(r.uint64())) - return types.NewChan(dir, r.typ()) - case mapType: - return types.NewMap(r.typ(), r.typ()) - case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) - - case structType: - r.currPkg = r.pkg() - - fields := make([]*types.Var, r.uint64()) - tags := make([]string, len(fields)) - for i := range fields { - fpos := r.pos() - fname := r.ident() - ftyp := r.typ() - emb := r.bool() - tag := r.string() - - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) - tags[i] = tag - } - return types.NewStruct(fields, tags) - - case interfaceType: - r.currPkg = r.pkg() - - embeddeds := make([]types.Type, r.uint64()) - for i := range embeddeds { - _ = r.pos() - embeddeds[i] = r.typ() - } - - methods := make([]*types.Func, r.uint64()) - for i := range methods { - mpos := r.pos() - mname := r.ident() - - // TODO(mdempsky): Matches bimport.go, but I - // don't agree with this. - var recv *types.Var - if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) - } - - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) - } - - typ := newInterface(methods, embeddeds) - r.p.interfaceList = append(r.p.interfaceList, typ) - return typ - - case typeParamType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - pkg, name := r.qualifiedIdent() - id := ident{pkg, name} - if t, ok := r.p.tparamIndex[id]; ok { - // We're already in the process of importing this typeparam. - return t - } - // Otherwise, import the definition of the typeparam now. - r.p.doDecl(pkg, name) - return r.p.tparamIndex[id] - - case instanceType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - // pos does not matter for instances: they are positioned on the original - // type. - _ = r.pos() - len := r.uint64() - targs := make([]types.Type, len) - for i := range targs { - targs[i] = r.typ() - } - baseType := r.typ() - // The imported instantiated type doesn't include any methods, so - // we must always use the methods of the base (orig) type. - // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) - return t - - case unionType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - terms := make([]*typeparams.Term, r.uint64()) - for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) - } - return typeparams.NewUnion(terms) - } -} - -func (r *importReader) kind() itag { - return itag(r.uint64()) -} - -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() - variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) -} - -func (r *importReader) tparamList() []*typeparams.TypeParam { - n := r.uint64() - if n == 0 { - return nil - } - xs := make([]*typeparams.TypeParam, n) - for i := range xs { - // Note: the standard library importer is tolerant of nil types here, - // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) - } - return xs -} - -func (r *importReader) paramList() *types.Tuple { - xs := make([]*types.Var, r.uint64()) - for i := range xs { - xs[i] = r.param() - } - return types.NewTuple(xs...) -} - -func (r *importReader) param() *types.Var { - pos := r.pos() - name := r.ident() - typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) -} - -func (r *importReader) bool() bool { - return r.uint64() != 0 -} - -func (r *importReader) int64() int64 { - n, err := binary.ReadVarint(&r.declReader) - if err != nil { - errorf("readVarint: %v", err) - } - return n -} - -func (r *importReader) uint64() uint64 { - n, err := binary.ReadUvarint(&r.declReader) - if err != nil { - errorf("readUvarint: %v", err) - } - return n -} - -func (r *importReader) byte() byte { - x, err := r.declReader.ReadByte() - if err != nil { - errorf("declReader.ReadByte: %v", err) - } - return x -} - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273ef..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go deleted file mode 100644 index a99384323..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go deleted file mode 100644 index 286bf4454..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5d69ffbe..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729c..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go deleted file mode 100644 index 3c1a43754..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Derived from go/internal/gcimporter/ureader.go - -//go:build go1.18 -// +build go1.18 - -package gcimporter - -import ( - "go/token" - "go/types" - "strings" - - "golang.org/x/tools/go/internal/pkgbits" -) - -// A pkgReader holds the shared state for reading a unified IR package -// description. -type pkgReader struct { - pkgbits.PkgDecoder - - fake fakeFileSet - - ctxt *types.Context - imports map[string]*types.Package // previously imported packages, indexed by path - - // lazily initialized arrays corresponding to the unified IR - // PosBase, Pkg, and Type sections, respectively. - posBases []string // position bases (i.e., file names) - pkgs []*types.Package - typs []types.Type - - // laterFns holds functions that need to be invoked at the end of - // import reading. - laterFns []func() -} - -// later adds a function to be invoked at the end of import reading. -func (pr *pkgReader) later(fn func()) { - pr.laterFns = append(pr.laterFns, fn) -} - -// See cmd/compile/internal/noder.derivedInfo. -type derivedInfo struct { - idx pkgbits.Index - needed bool -} - -// See cmd/compile/internal/noder.typeInfo. -type typeInfo struct { - idx pkgbits.Index - derived bool -} - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] - input := pkgbits.NewPkgDecoder(path, s) - pkg = readUnifiedPackage(fset, nil, imports, input) - return -} - -// readUnifiedPackage reads a package description from the given -// unified IR export data decoder. -func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { - pr := pkgReader{ - PkgDecoder: input, - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - - ctxt: ctxt, - imports: imports, - - posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), - pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), - typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), - } - defer pr.fake.setLines() - - r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) - pkg := r.pkg() - r.Bool() // has init - - for i, n := 0, r.Len(); i < n; i++ { - // As if r.obj(), but avoiding the Scope.Lookup call, - // to avoid eager loading of imports. - r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) - r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - assert(r.Len() == 0) - } - - r.Sync(pkgbits.SyncEOF) - - for _, fn := range pr.laterFns { - fn() - } - - pkg.MarkComplete() - return pkg -} - -// A reader holds the state for reading a single unified IR element -// within a package. -type reader struct { - pkgbits.Decoder - - p *pkgReader - - dict *readerDict -} - -// A readerDict holds the state for type parameters that parameterize -// the current unified IR element. -type readerDict struct { - // bounds is a slice of typeInfos corresponding to the underlying - // bounds of the element's type parameters. - bounds []typeInfo - - // tparams is a slice of the constructed TypeParams for the element. - tparams []*types.TypeParam - - // devived is a slice of types derived from tparams, which may be - // instantiated while reading the current element. - derived []derivedInfo - derivedTypes []types.Type // lazily instantiated from derived -} - -func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { - return &reader{ - Decoder: pr.NewDecoder(k, idx, marker), - p: pr, - } -} - -// @@@ Positions - -func (r *reader) pos() token.Pos { - r.Sync(pkgbits.SyncPos) - if !r.Bool() { - return token.NoPos - } - - // TODO(mdempsky): Delta encoding. - posBase := r.posBase() - line := r.Uint() - col := r.Uint() - return r.p.fake.pos(posBase, int(line), int(col)) -} - -func (r *reader) posBase() string { - return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) -} - -func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { - if b := pr.posBases[idx]; b != "" { - return b - } - - r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) - - // Within types2, position bases have a lot more details (e.g., - // keeping track of where //line directives appeared exactly). - // - // For go/types, we just track the file name. - - filename := r.String() - - if r.Bool() { // file base - // Was: "b = token.NewTrimmedFileBase(filename, true)" - } else { // line base - pos := r.pos() - line := r.Uint() - col := r.Uint() - - // Was: "b = token.NewLineBase(pos, filename, true, line, col)" - _, _, _ = pos, line, col - } - - b := filename - pr.posBases[idx] = b - return b -} - -// @@@ Packages - -func (r *reader) pkg() *types.Package { - r.Sync(pkgbits.SyncPkg) - return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) -} - -func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { - // TODO(mdempsky): Consider using some non-nil pointer to indicate - // the universe scope, so we don't need to keep re-reading it. - if pkg := pr.pkgs[idx]; pkg != nil { - return pkg - } - - pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() - pr.pkgs[idx] = pkg - return pkg -} - -func (r *reader) doPkg() *types.Package { - path := r.String() - switch path { - case "": - path = r.p.PkgPath() - case "builtin": - return nil // universe - case "unsafe": - return types.Unsafe - } - - if pkg := r.p.imports[path]; pkg != nil { - return pkg - } - - name := r.String() - - pkg := types.NewPackage(path, name) - r.p.imports[path] = pkg - - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(imports) - - return pkg -} - -// @@@ Types - -func (r *reader) typ() types.Type { - return r.p.typIdx(r.typInfo(), r.dict) -} - -func (r *reader) typInfo() typeInfo { - r.Sync(pkgbits.SyncType) - if r.Bool() { - return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} - } - return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} -} - -func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { - idx := info.idx - var where *types.Type - if info.derived { - where = &dict.derivedTypes[idx] - idx = dict.derived[idx].idx - } else { - where = &pr.typs[idx] - } - - if typ := *where; typ != nil { - return typ - } - - r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) - r.dict = dict - - typ := r.doTyp() - assert(typ != nil) - - // See comment in pkgReader.typIdx explaining how this happens. - if prev := *where; prev != nil { - return prev - } - - *where = typ - return typ -} - -func (r *reader) doTyp() (res types.Type) { - switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { - default: - errorf("unhandled type tag: %v", tag) - panic("unreachable") - - case pkgbits.TypeBasic: - return types.Typ[r.Len()] - - case pkgbits.TypeNamed: - obj, targs := r.obj() - name := obj.(*types.TypeName) - if len(targs) != 0 { - t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) - return t - } - return name.Type() - - case pkgbits.TypeTypeParam: - return r.dict.tparams[r.Len()] - - case pkgbits.TypeArray: - len := int64(r.Uint64()) - return types.NewArray(r.typ(), len) - case pkgbits.TypeChan: - dir := types.ChanDir(r.Len()) - return types.NewChan(dir, r.typ()) - case pkgbits.TypeMap: - return types.NewMap(r.typ(), r.typ()) - case pkgbits.TypePointer: - return types.NewPointer(r.typ()) - case pkgbits.TypeSignature: - return r.signature(nil, nil, nil) - case pkgbits.TypeSlice: - return types.NewSlice(r.typ()) - case pkgbits.TypeStruct: - return r.structType() - case pkgbits.TypeInterface: - return r.interfaceType() - case pkgbits.TypeUnion: - return r.unionType() - } -} - -func (r *reader) structType() *types.Struct { - fields := make([]*types.Var, r.Len()) - var tags []string - for i := range fields { - pos := r.pos() - pkg, name := r.selector() - ftyp := r.typ() - tag := r.String() - embedded := r.Bool() - - fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) - if tag != "" { - for len(tags) < i { - tags = append(tags, "") - } - tags = append(tags, tag) - } - } - return types.NewStruct(fields, tags) -} - -func (r *reader) unionType() *types.Union { - terms := make([]*types.Term, r.Len()) - for i := range terms { - terms[i] = types.NewTerm(r.Bool(), r.typ()) - } - return types.NewUnion(terms) -} - -func (r *reader) interfaceType() *types.Interface { - methods := make([]*types.Func, r.Len()) - embeddeds := make([]types.Type, r.Len()) - implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() - - for i := range methods { - pos := r.pos() - pkg, name := r.selector() - mtyp := r.signature(nil, nil, nil) - methods[i] = types.NewFunc(pos, pkg, name, mtyp) - } - - for i := range embeddeds { - embeddeds[i] = r.typ() - } - - iface := types.NewInterfaceType(methods, embeddeds) - if implicit { - iface.MarkImplicit() - } - return iface -} - -func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { - r.Sync(pkgbits.SyncSignature) - - params := r.params() - results := r.params() - variadic := r.Bool() - - return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) -} - -func (r *reader) params() *types.Tuple { - r.Sync(pkgbits.SyncParams) - - params := make([]*types.Var, r.Len()) - for i := range params { - params[i] = r.param() - } - - return types.NewTuple(params...) -} - -func (r *reader) param() *types.Var { - r.Sync(pkgbits.SyncParam) - - pos := r.pos() - pkg, name := r.localIdent() - typ := r.typ() - - return types.NewParam(pos, pkg, name, typ) -} - -// @@@ Objects - -func (r *reader) obj() (types.Object, []types.Type) { - r.Sync(pkgbits.SyncObject) - - assert(!r.Bool()) - - pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - obj := pkgScope(pkg).Lookup(name) - - targs := make([]types.Type, r.Len()) - for i := range targs { - targs[i] = r.typ() - } - - return obj, targs -} - -func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { - rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) - - objPkg, objName := rname.qualifiedIdent() - assert(objName != "") - - tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) - - if tag == pkgbits.ObjStub { - assert(objPkg == nil || objPkg == types.Unsafe) - return objPkg, objName - } - - if objPkg.Scope().Lookup(objName) == nil { - dict := pr.objDictIdx(idx) - - r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) - r.dict = dict - - declare := func(obj types.Object) { - objPkg.Scope().Insert(obj) - } - - switch tag { - default: - panic("weird") - - case pkgbits.ObjAlias: - pos := r.pos() - typ := r.typ() - declare(types.NewTypeName(pos, objPkg, objName, typ)) - - case pkgbits.ObjConst: - pos := r.pos() - typ := r.typ() - val := r.Value() - declare(types.NewConst(pos, objPkg, objName, typ, val)) - - case pkgbits.ObjFunc: - pos := r.pos() - tparams := r.typeParamNames() - sig := r.signature(nil, nil, tparams) - declare(types.NewFunc(pos, objPkg, objName, sig)) - - case pkgbits.ObjType: - pos := r.pos() - - obj := types.NewTypeName(pos, objPkg, objName, nil) - named := types.NewNamed(obj, nil, nil) - declare(obj) - - named.SetTypeParams(r.typeParamNames()) - - // TODO(mdempsky): Rewrite receiver types to underlying is an - // Interface? The go/types importer does this (I think because - // unit tests expected that), but cmd/compile doesn't care - // about it, so maybe we can avoid worrying about that here. - rhs := r.typ() - r.p.later(func() { - underlying := rhs.Underlying() - named.SetUnderlying(underlying) - }) - - for i, n := 0, r.Len(); i < n; i++ { - named.AddMethod(r.method()) - } - - case pkgbits.ObjVar: - pos := r.pos() - typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) - } - } - - return objPkg, objName -} - -func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { - r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) - - var dict readerDict - - if implicits := r.Len(); implicits != 0 { - errorf("unexpected object with %v implicit type parameter(s)", implicits) - } - - dict.bounds = make([]typeInfo, r.Len()) - for i := range dict.bounds { - dict.bounds[i] = r.typInfo() - } - - dict.derived = make([]derivedInfo, r.Len()) - dict.derivedTypes = make([]types.Type, len(dict.derived)) - for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} - } - - // function references follow, but reader doesn't need those - - return &dict -} - -func (r *reader) typeParamNames() []*types.TypeParam { - r.Sync(pkgbits.SyncTypeParamNames) - - // Note: This code assumes it only processes objects without - // implement type parameters. This is currently fine, because - // reader is only used to read in exported declarations, which are - // always package scoped. - - if len(r.dict.bounds) == 0 { - return nil - } - - // Careful: Type parameter lists may have cycles. To allow for this, - // we construct the type parameter list in two passes: first we - // create all the TypeNames and TypeParams, then we construct and - // set the bound type. - - r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) - for i := range r.dict.bounds { - pos := r.pos() - pkg, name := r.localIdent() - - tname := types.NewTypeName(pos, pkg, name, nil) - r.dict.tparams[i] = types.NewTypeParam(tname, nil) - } - - typs := make([]types.Type, len(r.dict.bounds)) - for i, bound := range r.dict.bounds { - typs[i] = r.p.typIdx(bound, r.dict) - } - - // TODO(mdempsky): This is subtle, elaborate further. - // - // We have to save tparams outside of the closure, because - // typeParamNames() can be called multiple times with the same - // dictionary instance. - // - // Also, this needs to happen later to make sure SetUnderlying has - // been called. - // - // TODO(mdempsky): Is it safe to have a single "later" slice or do - // we need to have multiple passes? See comments on CL 386002 and - // go.dev/issue/52104. - tparams := r.dict.tparams - r.p.later(func() { - for i, typ := range typs { - tparams[i].SetConstraint(typ) - } - }) - - return r.dict.tparams -} - -func (r *reader) method() *types.Func { - r.Sync(pkgbits.SyncMethod) - pos := r.pos() - pkg, name := r.selector() - - rparams := r.typeParamNames() - sig := r.signature(r.param(), rparams, nil) - - _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. - return types.NewFunc(pos, pkg, name, sig) -} - -func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } -func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } -func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } - -func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { - r.Sync(marker) - return r.pkg(), r.String() -} - -// pkgScope returns pkg.Scope(). -// If pkg is nil, it returns types.Universe instead. -// -// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. -func pkgScope(pkg *types.Package) *types.Scope { - if pkg != nil { - return pkg.Scope() - } - return types.Universe -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82..c6e7c0d44 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,42 +8,47 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" ) -var debug = false - -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +// TODO(adonovan): move back into go/packages. +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr } else { - return nil, friendlyErr + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go deleted file mode 100644 index 2bc793668..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "math/big" - "os" - "runtime" - "strings" -) - -// A PkgDecoder provides methods for decoding a package's Unified IR -// export data. -type PkgDecoder struct { - // version is the file format version. - version uint32 - - // sync indicates whether the file uses sync markers. - sync bool - - // pkgPath is the package path for the package to be decoded. - // - // TODO(mdempsky): Remove; unneeded since CL 391014. - pkgPath string - - // elemData is the full data payload of the encoded package. - // Elements are densely and contiguously packed together. - // - // The last 8 bytes of elemData are the package fingerprint. - elemData string - - // elemEnds stores the byte-offset end positions of element - // bitstreams within elemData. - // - // For example, element I's bitstream data starts at elemEnds[I-1] - // (or 0, if I==0) and ends at elemEnds[I]. - // - // Note: elemEnds is indexed by absolute indices, not - // section-relative indices. - elemEnds []uint32 - - // elemEndsEnds stores the index-offset end positions of relocation - // sections within elemEnds. - // - // For example, section K's end positions start at elemEndsEnds[K-1] - // (or 0, if K==0) and end at elemEndsEnds[K]. - elemEndsEnds [numRelocs]uint32 -} - -// PkgPath returns the package path for the package -// -// TODO(mdempsky): Remove; unneeded since CL 391014. -func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } - -// SyncMarkers reports whether pr uses sync markers. -func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } - -// NewPkgDecoder returns a PkgDecoder initialized to read the Unified -// IR export data from input. pkgPath is the package path for the -// compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. -func NewPkgDecoder(pkgPath, input string) PkgDecoder { - pr := PkgDecoder{ - pkgPath: pkgPath, - } - - // TODO(mdempsky): Implement direct indexing of input string to - // avoid copying the position information. - - r := strings.NewReader(input) - - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) - - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: - var flags uint32 - assert(binary.Read(r, binary.LittleEndian, &flags) == nil) - pr.sync = flags&flagSyncMarkers != 0 - } - - assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) - - pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) - assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) - - pos, err := r.Seek(0, os.SEEK_CUR) - assert(err == nil) - - pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) - - return pr -} - -// NumElems returns the number of elements in section k. -func (pr *PkgDecoder) NumElems(k RelocKind) int { - count := int(pr.elemEndsEnds[k]) - if k > 0 { - count -= int(pr.elemEndsEnds[k-1]) - } - return count -} - -// TotalElems returns the total number of elements across all sections. -func (pr *PkgDecoder) TotalElems() int { - return len(pr.elemEnds) -} - -// Fingerprint returns the package fingerprint. -func (pr *PkgDecoder) Fingerprint() [8]byte { - var fp [8]byte - copy(fp[:], pr.elemData[len(pr.elemData)-8:]) - return fp -} - -// AbsIdx returns the absolute index for the given (section, index) -// pair. -func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { - absIdx := int(idx) - if k > 0 { - absIdx += int(pr.elemEndsEnds[k-1]) - } - if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) - } - return absIdx -} - -// DataIdx returns the raw element bitstream for the given (section, -// index) pair. -func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { - absIdx := pr.AbsIdx(k, idx) - - var start uint32 - if absIdx > 0 { - start = pr.elemEnds[absIdx-1] - } - end := pr.elemEnds[absIdx] - - return pr.elemData[start:end] -} - -// StringIdx returns the string value for the given string index. -func (pr *PkgDecoder) StringIdx(idx Index) string { - return pr.DataIdx(RelocString, idx) -} - -// NewDecoder returns a Decoder for the given (section, index) pair, -// and decodes the given SyncMarker from the element bitstream. -func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { - r := pr.NewDecoderRaw(k, idx) - r.Sync(marker) - return r -} - -// NewDecoderRaw returns a Decoder for the given (section, index) pair. -// -// Most callers should use NewDecoder instead. -func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { - r := Decoder{ - common: pr, - k: k, - Idx: idx, - } - - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - - r.Sync(SyncRelocs) - r.Relocs = make([]RelocEnt, r.Len()) - for i := range r.Relocs { - r.Sync(SyncReloc) - r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} - } - - return r -} - -// A Decoder provides methods for decoding an individual element's -// bitstream data. -type Decoder struct { - common *PkgDecoder - - Relocs []RelocEnt - Data strings.Reader - - k RelocKind - Idx Index -} - -func (r *Decoder) checkErr(err error) { - if err != nil { - errorf("unexpected decoding error: %w", err) - } -} - -func (r *Decoder) rawUvarint() uint64 { - x, err := binary.ReadUvarint(&r.Data) - r.checkErr(err) - return x -} - -func (r *Decoder) rawVarint() int64 { - ux := r.rawUvarint() - - // Zig-zag decode. - x := int64(ux >> 1) - if ux&1 != 0 { - x = ^x - } - return x -} - -func (r *Decoder) rawReloc(k RelocKind, idx int) Index { - e := r.Relocs[idx] - assert(e.Kind == k) - return e.Idx -} - -// Sync decodes a sync marker from the element bitstream and asserts -// that it matches the expected marker. -// -// If r.common.sync is false, then Sync is a no-op. -func (r *Decoder) Sync(mWant SyncMarker) { - if !r.common.sync { - return - } - - pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved - mHave := SyncMarker(r.rawUvarint()) - writerPCs := make([]int, r.rawUvarint()) - for i := range writerPCs { - writerPCs[i] = int(r.rawUvarint()) - } - - if mHave == mWant { - return - } - - // There's some tension here between printing: - // - // (1) full file paths that tools can recognize (e.g., so emacs - // hyperlinks the "file:line" text for easy navigation), or - // - // (2) short file paths that are easier for humans to read (e.g., by - // omitting redundant or irrelevant details, so it's easier to - // focus on the useful bits that remain). - // - // The current formatting favors the former, as it seems more - // helpful in practice. But perhaps the formatting could be improved - // to better address both concerns. For example, use relative file - // paths if they would be shorter, or rewrite file paths to contain - // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how - // to reliably expand that again. - - fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) - - fmt.Printf("\nfound %v, written at:\n", mHave) - if len(writerPCs) == 0 { - fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) - } - for _, pc := range writerPCs { - fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) - } - - fmt.Printf("\nexpected %v, reading at:\n", mWant) - var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? - n := runtime.Callers(2, readerPCs[:]) - for _, pc := range fmtFrames(readerPCs[:n]...) { - fmt.Printf("\t%s\n", pc) - } - - // We already printed a stack trace for the reader, so now we can - // simply exit. Printing a second one with panic or base.Fatalf - // would just be noise. - os.Exit(1) -} - -// Bool decodes and returns a bool value from the element bitstream. -func (r *Decoder) Bool() bool { - r.Sync(SyncBool) - x, err := r.Data.ReadByte() - r.checkErr(err) - assert(x < 2) - return x != 0 -} - -// Int64 decodes and returns an int64 value from the element bitstream. -func (r *Decoder) Int64() int64 { - r.Sync(SyncInt64) - return r.rawVarint() -} - -// Int64 decodes and returns a uint64 value from the element bitstream. -func (r *Decoder) Uint64() uint64 { - r.Sync(SyncUint64) - return r.rawUvarint() -} - -// Len decodes and returns a non-negative int value from the element bitstream. -func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } - -// Int decodes and returns an int value from the element bitstream. -func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } - -// Uint decodes and returns a uint value from the element bitstream. -func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } - -// Code decodes a Code value from the element bitstream and returns -// its ordinal value. It's the caller's responsibility to convert the -// result to an appropriate Code type. -// -// TODO(mdempsky): Ideally this method would have signature "Code[T -// Code] T" instead, but we don't allow generic methods and the -// compiler can't depend on generics yet anyway. -func (r *Decoder) Code(mark SyncMarker) int { - r.Sync(mark) - return r.Len() -} - -// Reloc decodes a relocation of expected section k from the element -// bitstream and returns an index to the referenced element. -func (r *Decoder) Reloc(k RelocKind) Index { - r.Sync(SyncUseReloc) - return r.rawReloc(k, r.Len()) -} - -// String decodes and returns a string value from the element -// bitstream. -func (r *Decoder) String() string { - r.Sync(SyncString) - return r.common.StringIdx(r.Reloc(RelocString)) -} - -// Strings decodes and returns a variable-length slice of strings from -// the element bitstream. -func (r *Decoder) Strings() []string { - res := make([]string, r.Len()) - for i := range res { - res[i] = r.String() - } - return res -} - -// Value decodes and returns a constant.Value from the element -// bitstream. -func (r *Decoder) Value() constant.Value { - r.Sync(SyncValue) - isComplex := r.Bool() - val := r.scalar() - if isComplex { - val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) - } - return val -} - -func (r *Decoder) scalar() constant.Value { - switch tag := CodeVal(r.Code(SyncVal)); tag { - default: - panic(fmt.Errorf("unexpected scalar tag: %v", tag)) - - case ValBool: - return constant.MakeBool(r.Bool()) - case ValString: - return constant.MakeString(r.String()) - case ValInt64: - return constant.MakeInt64(r.Int64()) - case ValBigInt: - return constant.Make(r.bigInt()) - case ValBigRat: - num := r.bigInt() - denom := r.bigInt() - return constant.Make(new(big.Rat).SetFrac(num, denom)) - case ValBigFloat: - return constant.Make(r.bigFloat()) - } -} - -func (r *Decoder) bigInt() *big.Int { - v := new(big.Int).SetBytes([]byte(r.String())) - if r.Bool() { - v.Neg(v) - } - return v -} - -func (r *Decoder) bigFloat() *big.Float { - v := new(big.Float).SetPrec(512) - assert(v.UnmarshalText([]byte(r.String())) == nil) - return v -} - -// @@@ Helpers - -// TODO(mdempsky): These should probably be removed. I think they're a -// smell that the export data format is not yet quite right. - -// PeekPkgPath returns the package path for the specified package -// index. -func (pr *PkgDecoder) PeekPkgPath(idx Index) string { - r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) - path := r.String() - if path == "" { - path = pr.pkgPath - } - return path -} - -// PeekObj returns the package path, object name, and CodeObj for the -// specified object index. -func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { - r := pr.NewDecoder(RelocName, idx, SyncObject1) - r.Sync(SyncSym) - r.Sync(SyncPkg) - path := pr.PeekPkgPath(r.Reloc(RelocPkg)) - name := r.String() - assert(name != "") - - tag := CodeObj(r.Code(SyncCodeObj)) - - return path, name, tag -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go deleted file mode 100644 index c50c838ca..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "bytes" - "crypto/md5" - "encoding/binary" - "go/constant" - "io" - "math/big" - "runtime" -) - -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - -// A PkgEncoder provides methods for encoding a package's Unified IR -// export data. -type PkgEncoder struct { - // elems holds the bitstream for previously encoded elements. - elems [numRelocs][]string - - // stringsIdx maps previously encoded strings to their index within - // the RelocString section, to allow deduplication. That is, - // elems[RelocString][stringsIdx[s]] == s (if present). - stringsIdx map[string]Index - - // syncFrames is the number of frames to write at each sync - // marker. A negative value means sync markers are omitted. - syncFrames int -} - -// SyncMarkers reports whether pw uses sync markers. -func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } - -// NewPkgEncoder returns an initialized PkgEncoder. -// -// syncFrames is the number of caller frames that should be serialized -// at Sync points. Serializing additional frames results in larger -// export data files, but can help diagnosing desync errors in -// higher-level Unified IR reader/writer code. If syncFrames is -// negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { - return PkgEncoder{ - stringsIdx: make(map[string]Index), - syncFrames: syncFrames, - } -} - -// DumpTo writes the package's encoded data to out0 and returns the -// package fingerprint. -func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { - h := md5.New() - out := io.MultiWriter(out0, h) - - writeUint32 := func(x uint32) { - assert(binary.Write(out, binary.LittleEndian, x) == nil) - } - - writeUint32(currentVersion) - - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers - } - writeUint32(flags) - - // Write elemEndsEnds. - var sum uint32 - for _, elems := range &pw.elems { - sum += uint32(len(elems)) - writeUint32(sum) - } - - // Write elemEnds. - sum = 0 - for _, elems := range &pw.elems { - for _, elem := range elems { - sum += uint32(len(elem)) - writeUint32(sum) - } - } - - // Write elemData. - for _, elems := range &pw.elems { - for _, elem := range elems { - _, err := io.WriteString(out, elem) - assert(err == nil) - } - } - - // Write fingerprint. - copy(fingerprint[:], h.Sum(nil)) - _, err := out0.Write(fingerprint[:]) - assert(err == nil) - - return -} - -// StringIdx adds a string value to the strings section, if not -// already present, and returns its index. -func (pw *PkgEncoder) StringIdx(s string) Index { - if idx, ok := pw.stringsIdx[s]; ok { - assert(pw.elems[RelocString][idx] == s) - return idx - } - - idx := Index(len(pw.elems[RelocString])) - pw.elems[RelocString] = append(pw.elems[RelocString], s) - pw.stringsIdx[s] = idx - return idx -} - -// NewEncoder returns an Encoder for a new element within the given -// section, and encodes the given SyncMarker as the start of the -// element bitstream. -func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { - e := pw.NewEncoderRaw(k) - e.Sync(marker) - return e -} - -// NewEncoderRaw returns an Encoder for a new element within the given -// section. -// -// Most callers should use NewEncoder instead. -func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { - idx := Index(len(pw.elems[k])) - pw.elems[k] = append(pw.elems[k], "") // placeholder - - return Encoder{ - p: pw, - k: k, - Idx: idx, - } -} - -// An Encoder provides methods for encoding an individual element's -// bitstream data. -type Encoder struct { - p *PkgEncoder - - Relocs []RelocEnt - Data bytes.Buffer // accumulated element bitstream data - - encodingRelocHeader bool - - k RelocKind - Idx Index // index within relocation section -} - -// Flush finalizes the element's bitstream and returns its Index. -func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved - - // Backup the data so we write the relocations at the front. - var tmp bytes.Buffer - io.Copy(&tmp, &w.Data) - - // TODO(mdempsky): Consider writing these out separately so they're - // easier to strip, along with function bodies, so that we can prune - // down to just the data that's relevant to go/types. - if w.encodingRelocHeader { - panic("encodingRelocHeader already true; recursive flush?") - } - w.encodingRelocHeader = true - w.Sync(SyncRelocs) - w.Len(len(w.Relocs)) - for _, rEnt := range w.Relocs { - w.Sync(SyncReloc) - w.Len(int(rEnt.Kind)) - w.Len(int(rEnt.Idx)) - } - - io.Copy(&sb, &w.Data) - io.Copy(&sb, &tmp) - w.p.elems[w.k][w.Idx] = sb.String() - - return w.Idx -} - -func (w *Encoder) checkErr(err error) { - if err != nil { - errorf("unexpected encoding error: %v", err) - } -} - -func (w *Encoder) rawUvarint(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - _, err := w.Data.Write(buf[:n]) - w.checkErr(err) -} - -func (w *Encoder) rawVarint(x int64) { - // Zig-zag encode. - ux := uint64(x) << 1 - if x < 0 { - ux = ^ux - } - - w.rawUvarint(ux) -} - -func (w *Encoder) rawReloc(r RelocKind, idx Index) int { - // TODO(mdempsky): Use map for lookup; this takes quadratic time. - for i, rEnt := range w.Relocs { - if rEnt.Kind == r && rEnt.Idx == idx { - return i - } - } - - i := len(w.Relocs) - w.Relocs = append(w.Relocs, RelocEnt{r, idx}) - return i -} - -func (w *Encoder) Sync(m SyncMarker) { - if !w.p.SyncMarkers() { - return - } - - // Writing out stack frame string references requires working - // relocations, but writing out the relocations themselves involves - // sync markers. To prevent infinite recursion, we simply trim the - // stack frame for sync markers within the relocation header. - var frames []string - if !w.encodingRelocHeader && w.p.syncFrames > 0 { - pcs := make([]uintptr, w.p.syncFrames) - n := runtime.Callers(2, pcs) - frames = fmtFrames(pcs[:n]...) - } - - // TODO(mdempsky): Save space by writing out stack frames as a - // linked list so we can share common stack frames. - w.rawUvarint(uint64(m)) - w.rawUvarint(uint64(len(frames))) - for _, frame := range frames { - w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) - } -} - -// Bool encodes and writes a bool value into the element bitstream, -// and then returns the bool value. -// -// For simple, 2-alternative encodings, the idiomatic way to call Bool -// is something like: -// -// if w.Bool(x != 0) { -// // alternative #1 -// } else { -// // alternative #2 -// } -// -// For multi-alternative encodings, use Code instead. -func (w *Encoder) Bool(b bool) bool { - w.Sync(SyncBool) - var x byte - if b { - x = 1 - } - err := w.Data.WriteByte(x) - w.checkErr(err) - return b -} - -// Int64 encodes and writes an int64 value into the element bitstream. -func (w *Encoder) Int64(x int64) { - w.Sync(SyncInt64) - w.rawVarint(x) -} - -// Uint64 encodes and writes a uint64 value into the element bitstream. -func (w *Encoder) Uint64(x uint64) { - w.Sync(SyncUint64) - w.rawUvarint(x) -} - -// Len encodes and writes a non-negative int value into the element bitstream. -func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } - -// Int encodes and writes an int value into the element bitstream. -func (w *Encoder) Int(x int) { w.Int64(int64(x)) } - -// Len encodes and writes a uint value into the element bitstream. -func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } - -// Reloc encodes and writes a relocation for the given (section, -// index) pair into the element bitstream. -// -// Note: Only the index is formally written into the element -// bitstream, so bitstream decoders must know from context which -// section an encoded relocation refers to. -func (w *Encoder) Reloc(r RelocKind, idx Index) { - w.Sync(SyncUseReloc) - w.Len(w.rawReloc(r, idx)) -} - -// Code encodes and writes a Code value into the element bitstream. -func (w *Encoder) Code(c Code) { - w.Sync(c.Marker()) - w.Len(c.Value()) -} - -// String encodes and writes a string value into the element -// bitstream. -// -// Internally, strings are deduplicated by adding them to the strings -// section (if not already present), and then writing a relocation -// into the element bitstream. -func (w *Encoder) String(s string) { - w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) -} - -// Strings encodes and writes a variable-length slice of strings into -// the element bitstream. -func (w *Encoder) Strings(ss []string) { - w.Len(len(ss)) - for _, s := range ss { - w.String(s) - } -} - -// Value encodes and writes a constant.Value into the element -// bitstream. -func (w *Encoder) Value(val constant.Value) { - w.Sync(SyncValue) - if w.Bool(val.Kind() == constant.Complex) { - w.scalar(constant.Real(val)) - w.scalar(constant.Imag(val)) - } else { - w.scalar(val) - } -} - -func (w *Encoder) scalar(val constant.Value) { - switch v := constant.Val(val).(type) { - default: - errorf("unhandled %v (%v)", val, val.Kind()) - case bool: - w.Code(ValBool) - w.Bool(v) - case string: - w.Code(ValString) - w.String(v) - case int64: - w.Code(ValInt64) - w.Int64(v) - case *big.Int: - w.Code(ValBigInt) - w.bigInt(v) - case *big.Rat: - w.Code(ValBigRat) - w.bigInt(v.Num()) - w.bigInt(v.Denom()) - case *big.Float: - w.Code(ValBigFloat) - w.bigFloat(v) - } -} - -func (w *Encoder) bigInt(v *big.Int) { - b := v.Bytes() - w.String(string(b)) // TODO: More efficient encoding. - w.Bool(v.Sign() < 0) -} - -func (w *Encoder) bigFloat(v *big.Float) { - b := v.Append(nil, 'p', -1) - w.String(string(b)) // TODO: More efficient encoding. -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go deleted file mode 100644 index 7a8f04ab3..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -// A RelocKind indicates a particular section within a unified IR export. -type RelocKind int - -// An Index represents a bitstream element index within a particular -// section. -type Index int - -// A relocEnt (relocation entry) is an entry in an element's local -// reference table. -// -// TODO(mdempsky): Rename this too. -type RelocEnt struct { - Kind RelocKind - Idx Index -} - -// Reserved indices within the meta relocation section. -const ( - PublicRootIdx Index = 0 - PrivateRootIdx Index = 1 -) - -const ( - RelocString RelocKind = iota - RelocMeta - RelocPosBase - RelocPkg - RelocName - RelocType - RelocObj - RelocObjExt - RelocObjDict - RelocBody - - numRelocs = iota -) diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/doc.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe..3531ac8f5 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/doc.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,20 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -35,7 +43,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) @@ -64,9 +72,31 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" @@ -168,14 +198,6 @@ Instead, ssadump no longer requests the runtime package, but seeks it among the dependencies of the user-specified packages, and emits an error if it is not found. -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - Questions & Tasks - Add GOARCH/GOOS? diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/external.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7d..c2b4b711b 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/external.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/external.go @@ -2,46 +2,85 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. + + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -80,7 +119,19 @@ func findExternalDriver(cfg *Config) driver { stderr := new(bytes.Buffer) cmd := exec.CommandContext(cfg.Context, tool, words...) cmd.Dir = cfg.Dir - cmd.Env = cfg.Env + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -92,10 +143,14 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } return &response, nil } } + +// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// TODO(adonovan): use go1.21 slices.Clip. +func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist.go index de881562d..d9be410aa 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,10 +9,9 @@ import ( "context" "encoding/json" "fmt" - "go/types" - "io/ioutil" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -22,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -37,29 +35,30 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } for _, root := range dr.Roots { r.addRoot(root) } + r.dr.GoVersion = dr.GoVersion } func (r *responseDeduper) addPackage(p *Package) { @@ -129,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -147,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) - sizeswg.Done() + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + response.dr.Compiler = compiler + response.dr.Arch = arch + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -209,87 +210,10 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. @@ -341,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -432,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -454,11 +378,14 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse if err != nil { return nil, err } + seen := make(map[string]*jsonPackage) pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - var response driverResponse + response := &DriverResponse{ + GoVersion: goVersion, + } for dec := json.NewDecoder(buf); dec.More(); { p := new(jsonPackage) if err := dec.Decode(p); err != nil { @@ -600,17 +527,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Work around https://golang.org/issue/28749: // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Filter out any elements of CompiledGoFiles that are also in OtherFiles. - // We have to keep this workaround in place until go1.12 is a distant memory. - if len(pkg.OtherFiles) > 0 { - other := make(map[string]bool, len(pkg.OtherFiles)) - for _, f := range pkg.OtherFiles { - other[f] = true - } - + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { out := pkg.CompiledGoFiles[:0] for _, f := range pkg.CompiledGoFiles { - if other[f] { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file continue } out = append(out, f) @@ -626,7 +548,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -664,16 +591,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") @@ -730,7 +653,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) - return &response, nil + return response, nil } func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { @@ -756,6 +679,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath } +// getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) @@ -891,6 +815,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) @@ -908,6 +841,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, + Overlay: cfg.goListOverlayFile, } } @@ -916,26 +850,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, cfg := state.cfg inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } inv.Verb = verb inv.Args = args gocmdRunner := cfg.gocmdRunner @@ -1082,67 +996,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return stdout, nil } -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := ioutil.TempDir("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472f..d823c474a 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packages.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packages.go index a93dc6add..34306ddd3 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packages.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packages.go @@ -9,34 +9,48 @@ package packages import ( "context" "encoding/json" + "errors" "fmt" "go/ast" "go/parser" "go/scanner" "go/token" "go/types" - "io/ioutil" + "io" "log" "os" "path/filepath" + "runtime" "strings" "sync" "time" + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. // The bits below can be combined to specify which fields should be // filled in the result packages. +// // The zero value is a special case, equivalent to combining // the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// // ID and Errors (if present) will always be filled. -// Load may return more information than requested. +// [Load] may return more information than requested. +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://github.com/golang/go/issues/48226 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( @@ -119,15 +133,21 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. +// // Calls to Load do not modify this struct. +// +// TODO(adonovan): #67702: this is currently false: in fact, +// calls to [Load] do not modify the public fields of this struct, but +// may modify hidden fields, so concurrent calls to [Load] must not +// use the same Config. But perhaps we should reestablish the +// documented invariant. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode // Context specifies the context for the load operation. - // If the context is cancelled, the loader may stop early - // and return an ErrCancelled error. - // If Context is nil, the load cannot be cancelled. + // Cancelling the context may cause [Load] to abort and + // return an error. Context context.Context // Logf is the logger for the config. @@ -196,43 +216,23 @@ type Config struct { // setting Tests may have no effect. Tests bool - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. + // Overlay is a mapping from absolute file paths to file contents. + // + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. Overlay map[string][]byte -} -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package + // goListOverlayFile is the JSON file that encodes the Overlay + // mapping, used by 'go list -overlay=...' + goListOverlayFile string } // Load loads and returns the Go packages named by the given patterns. @@ -240,8 +240,22 @@ type driverResponse struct { // Config specifies loading options; // nil behaves the same as an empty Config. // -// Load returns an error if any of the patterns was invalid -// as defined by the underlying build system. +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// NeedName | NeedFiles | NeedCompiledGoFiles. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. +// +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. // It may return an empty list of packages without an error, // for instance for an empty expansion of a valid wildcard. // Errors associated with a particular package are recorded in the @@ -250,34 +264,162 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = response.Sizes - return l.refine(response.Roots, response.Packages...) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + + if driver := findExternalDriver(cfg); driver != nil { + response, err := callDriverOnChunks(driver, cfg, chunks) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) + } + + // go list fallback + // + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err } - response, err := driver(cfg, patterns...) + defer cleanupOverlay() + cfg.goListOverlayFile = overlay + + response, err := callDriverOnChunks(goListDriver, cfg, chunks) if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) + return nil, false, err } - return response, nil + return response, false, err +} + +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + i := i + chunk := chunk + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk...) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr } // A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Package struct { // ID is a unique identifier for a package, // in a syntax provided by the underlying build system. @@ -297,7 +439,13 @@ type Package struct { // of the package, or while parsing or type-checking its files. Errors []Error + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source @@ -330,19 +478,30 @@ type Package struct { // to corresponding loaded Packages. Imports map[string]*Package + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + // Types provides type information for the package. // The NeedTypes LoadMode bit sets this field for packages matching the // patterns; type information for dependencies may be missing or incomplete, // unless NeedDeps and NeedImports are also set. - Types *types.Package + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. + Types *types.Package `json:"-"` // Fset provides position information for Types, TypesInfo, and Syntax. // It is set only when Types is set. - Fset *token.FileSet + Fset *token.FileSet `json:"-"` // IllTyped indicates whether the package or any dependency contains errors. // It is set only when Types is set. - IllTyped bool + IllTyped bool `json:"-"` // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. // @@ -352,26 +511,28 @@ type Package struct { // // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File + Syntax []*ast.File `json:"-"` // TypesInfo provides type information about the package's syntax trees. // It is set only when Syntax is set. - TypesInfo *types.Info + TypesInfo *types.Info `json:"-"` // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes + TypesSizes types.Sizes `json:"-"` + + // -- internal -- // forTest is the package under test, if any. forTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module } // Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. type Module struct { Path string // module path Version string // module version @@ -397,12 +558,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -510,6 +665,7 @@ func (p *Package) UnmarshalJSON(b []byte) error { OtherFiles: flat.OtherFiles, EmbedFiles: flat.EmbedFiles, EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -532,13 +688,14 @@ type loaderPackage struct { needsrc bool // load from source (Mode >= LoadTypes) needtypes bool // type information is either requested or depended on initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -616,9 +773,10 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { + roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { rootMap[root] = i @@ -626,7 +784,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs = make(map[string]*loaderPackage) // first pass, fixup and build the map and roots var initial = make([]*loaderPackage, len(roots)) - for _, pkg := range list { + for _, pkg := range response.Packages { rootIndex := -1 if i, found := rootMap[pkg.ID]; found { rootIndex = i @@ -648,6 +806,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { Package: pkg, needtypes: needtypes, needsrc: needsrc, + goVersion: response.GoVersion, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -661,39 +820,38 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -717,40 +875,39 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -765,6 +922,12 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { wg.Wait() } + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + result := make([]*Package, len(initial)) for i, lpkg := range initial { result[i] = lpkg.Package @@ -860,17 +1023,32 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) lpkg.Fset = ld.Fset + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + // Subtle: we populate all Types fields with an empty Package // before loading export data so that export data processing // never has to create a types.Package for an indirect dependency, // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) // The Diamond test exercises this case. if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { - ld.loadFromExportData(lpkg) + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } return // not a source package, don't get syntax trees } @@ -902,6 +1080,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { case types.Error: // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) errs = append(errs, Error{ Pos: err.Fset.Position(err.Pos).String(), Msg: err.Msg, @@ -923,11 +1102,41 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Errors = append(lpkg.Errors, errs...) } + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { // The config requested loading sources and types, but sources are missing. // Add an error to the package and fall back to loading from export data. appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) - ld.loadFromExportData(lpkg) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + return // can't get syntax trees for this package } @@ -941,15 +1150,23 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + lpkg.TypesInfo = &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -981,13 +1198,16 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { tc := &types.Config{ Importer: importer, - // Type-check bodies of functions only in non-initial packages. + // Type-check bodies of functions only in initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + tc.GoVersion = "go" + lpkg.Module.GoVersion } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -998,10 +1218,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } } - types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + // If !Cgo, the type-checker uses FakeImportC mode, so // it doesn't invoke the importer for import "C", // nor report an error for the import, @@ -1023,6 +1257,12 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + // Record accumulated errors. illTyped := len(lpkg.Errors) > 0 if !illTyped { @@ -1068,7 +1308,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { @@ -1094,11 +1334,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { parsed := make([]*ast.File, n) errors := make([]error, n) for i, file := range filenames { - if ld.Config.Context.Err() != nil { - parsed[i] = nil - errors[i] = ld.Config.Context.Err() - continue - } wg.Add(1) go func(i int, filename string) { parsed[i], errors[i] = ld.parseFile(filename) @@ -1151,9 +1386,10 @@ func sameFile(x, y string) bool { return false } -// loadFromExportData returns type information for the specified +// loadFromExportData ensures that type information is present for the specified // package, loading it from an export data file on the first request. -func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { if lpkg.PkgPath == "" { log.Fatalf("internal error: Package %s has no PkgPath", lpkg) } @@ -1164,8 +1400,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // must be sequential. (Finer-grained locking would require // changes to the gcexportdata API.) // - // The exportMu lock guards the Package.Pkg field and the - // types.Package it points to, for each Package in the graph. + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. // // Not all accesses to Package.Pkg need to be protected by exportMu: // graph ordering ensures that direct dependencies of source @@ -1174,18 +1410,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error defer ld.exportMu.Unlock() if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { - return tpkg, nil // cache hit + return nil // cache hit } lpkg.IllTyped = true // fail safe if lpkg.ExportFile == "" { // Errors while building export data will have been printed to stderr. - return nil, fmt.Errorf("no export data file") + return fmt.Errorf("no export data file") } f, err := os.Open(lpkg.ExportFile) if err != nil { - return nil, err + return err } defer f.Close() @@ -1197,7 +1433,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // queries.) r, err := gcexportdata.NewReader(f) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } // Build the view. @@ -1241,7 +1477,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // (May modify incomplete packages in view but not create new ones.) tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } if _, ok := view["go.shape"]; ok { // Account for the pseudopackage "go.shape" that gets @@ -1254,8 +1490,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error lpkg.Types = tpkg lpkg.IllTyped = false - - return tpkg, nil + return nil } // impliedLoadMode returns loadMode with its dependencies. @@ -1271,3 +1506,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/expect.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/expect.go index 841099c0c..14a644613 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/expect.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/expect.go @@ -7,7 +7,6 @@ package packagestest import ( "fmt" "go/token" - "io/ioutil" "os" "path/filepath" "reflect" @@ -16,7 +15,6 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/span" ) const ( @@ -124,14 +122,16 @@ func (e *Exported) Expect(methods map[string]interface{}) error { return nil } -// Range is a type alias for span.Range for backwards compatibility, prefer -// using span.Range directly. -type Range = span.Range +// A Range represents an interval within a source file in go/token notation. +type Range struct { + TokFile *token.File // non-nil + Start, End token.Pos // both valid and within range of TokFile +} // Mark adds a new marker to the known set. func (e *Exported) Mark(name string, r Range) { if e.markers == nil { - e.markers = make(map[string]span.Range) + e.markers = make(map[string]Range) } e.markers[name] = r } @@ -210,7 +210,7 @@ func goModMarkers(e *Exported, gomod string) ([]*expect.Note, error) { } gomod = strings.TrimSuffix(gomod, ".temp") // If we are in Modules mode, copy the original contents file back into go.mod - if err := ioutil.WriteFile(gomod, content, 0644); err != nil { + if err := os.WriteFile(gomod, content, 0644); err != nil { return nil, nil } return expect.Parse(e.ExpectFileSet, gomod, content) @@ -221,7 +221,7 @@ func (e *Exported) getMarkers() error { return nil } // set markers early so that we don't call getMarkers again from Expect - e.markers = make(map[string]span.Range) + e.markers = make(map[string]Range) return e.Expect(map[string]interface{}{ markMethod: e.Mark, }) @@ -232,8 +232,7 @@ var ( identifierType = reflect.TypeOf(expect.Identifier("")) posType = reflect.TypeOf(token.Pos(0)) positionType = reflect.TypeOf(token.Position{}) - rangeType = reflect.TypeOf(span.Range{}) - spanType = reflect.TypeOf(span.Span{}) + rangeType = reflect.TypeOf(Range{}) fsetType = reflect.TypeOf((*token.FileSet)(nil)) regexType = reflect.TypeOf((*regexp.Regexp)(nil)) exportedType = reflect.TypeOf((*Exported)(nil)) @@ -295,18 +294,6 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) { } return reflect.ValueOf(r), remains, nil }, nil - case pt == spanType: - return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) { - r, remains, err := e.rangeConverter(n, args) - if err != nil { - return reflect.Value{}, nil, err - } - spn, err := r.Span() - if err != nil { - return reflect.Value{}, nil, err - } - return reflect.ValueOf(spn), remains, nil - }, nil case pt == identifierType: return func(n *expect.Note, args []interface{}) (reflect.Value, []interface{}, error) { if len(args) < 1 { @@ -408,10 +395,10 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) { } } -func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) { +func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (Range, []interface{}, error) { tokFile := e.ExpectFileSet.File(n.Pos) if len(args) < 1 { - return span.Range{}, nil, fmt.Errorf("missing argument") + return Range{}, nil, fmt.Errorf("missing argument") } arg := args[0] args = args[1:] @@ -422,34 +409,60 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang case eofIdentifier: // end of file identifier eof := tokFile.Pos(tokFile.Size()) - return span.NewRange(tokFile, eof, eof), args, nil + return newRange(tokFile, eof, eof), args, nil default: // look up an marker by name mark, ok := e.markers[string(arg)] if !ok { - return span.Range{}, nil, fmt.Errorf("cannot find marker %v", arg) + return Range{}, nil, fmt.Errorf("cannot find marker %v", arg) } return mark, args, nil } case string: start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg) if err != nil { - return span.Range{}, nil, err + return Range{}, nil, err } if !start.IsValid() { - return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) + return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(tokFile, start, end), args, nil + return newRange(tokFile, start, end), args, nil case *regexp.Regexp: start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg) if err != nil { - return span.Range{}, nil, err + return Range{}, nil, err } if !start.IsValid() { - return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) + return Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(tokFile, start, end), args, nil + return newRange(tokFile, start, end), args, nil default: - return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg) + return Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg) + } +} + +// newRange creates a new Range from a token.File and two valid positions within it. +func newRange(file *token.File, start, end token.Pos) Range { + fileBase := file.Base() + fileEnd := fileBase + file.Size() + if !start.IsValid() { + panic("invalid start token.Pos") + } + if !end.IsValid() { + panic("invalid end token.Pos") + } + if int(start) < fileBase || int(start) > fileEnd { + panic(fmt.Sprintf("invalid start: %d not in [%d, %d]", start, fileBase, fileEnd)) + } + if int(end) < fileBase || int(end) > fileEnd { + panic(fmt.Sprintf("invalid end: %d not in [%d, %d]", end, fileBase, fileEnd)) + } + if start > end { + panic("invalid start: greater than end") + } + return Range{ + TokFile: file, + Start: start, + End: end, } } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/export.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/export.go index 894dcdd44..67d48562f 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/export.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/export.go @@ -69,7 +69,6 @@ import ( "fmt" "go/token" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -79,7 +78,6 @@ import ( "golang.org/x/tools/go/expect" "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/testenv" ) @@ -129,7 +127,7 @@ type Exported struct { primary string // the first non GOROOT module that was exported written map[string]map[string]string // the full set of exported files notes []*expect.Note // The list of expectations extracted from go source files - markers map[string]span.Range // The set of markers extracted from go source files + markers map[string]Range // The set of markers extracted from go source files } // Exporter implementations are responsible for converting from the generic description of some @@ -149,7 +147,7 @@ type Exporter interface { // All is the list of known exporters. // This is used by TestAll to run tests with all the exporters. -var All []Exporter +var All = []Exporter{GOPATH, Modules} // TestAll invokes the testing function once for each exporter registered in // the All global. @@ -199,7 +197,7 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported { dirname := strings.Replace(t.Name(), "/", "_", -1) dirname = strings.Replace(dirname, "#", "_", -1) // duplicate subtests get a #NNN suffix. - temp, err := ioutil.TempDir("", dirname) + temp, err := os.MkdirTemp("", dirname) if err != nil { t.Fatal(err) } @@ -218,6 +216,9 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported { written: map[string]map[string]string{}, ExpectFileSet: token.NewFileSet(), } + if testing.Verbose() { + exported.Config.Logf = t.Logf + } defer func() { if t.Failed() || t.Skipped() { exported.Cleanup() @@ -252,7 +253,7 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported { t.Fatal(err) } case string: - if err := ioutil.WriteFile(fullpath, []byte(value), 0644); err != nil { + if err := os.WriteFile(fullpath, []byte(value), 0644); err != nil { t.Fatal(err) } default: @@ -276,7 +277,7 @@ func Export(t testing.TB, exporter Exporter, modules []Module) *Exported { // It is intended for source files that are shell scripts. func Script(contents string) Writer { return func(filename string) error { - return ioutil.WriteFile(filename, []byte(contents), 0755) + return os.WriteFile(filename, []byte(contents), 0755) } } @@ -657,7 +658,7 @@ func (e *Exported) FileContents(filename string) ([]byte, error) { if content, found := e.Config.Overlay[filename]; found { return content, nil } - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/gopath.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/gopath.go index d56f523ed..c2e57a154 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/gopath.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/gopath.go @@ -41,10 +41,6 @@ import ( // /sometemporarydirectory/repoa/src var GOPATH = gopath{} -func init() { - All = append(All, GOPATH) -} - type gopath struct{} func (gopath) Name() string { diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules.go index 69a6c935d..0c8d3d8fe 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules.go @@ -5,9 +5,9 @@ package packagestest import ( + "bytes" "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -15,12 +15,11 @@ import ( "strings" "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/proxydir" ) // Modules is the exporter that produces module layouts. -// Each "repository" is put in it's own module, and the module file generated +// Each "repository" is put in its own module, and the module file generated // will have replace directives for all other modules. // Given the two files // @@ -90,17 +89,18 @@ func (modules) Finalize(exported *Exported) error { // If the primary module already has a go.mod, write the contents to a temp // go.mod for now and then we will reset it when we are getting all the markers. if gomod := exported.written[exported.primary]["go.mod"]; gomod != "" { - contents, err := ioutil.ReadFile(gomod) + contents, err := os.ReadFile(gomod) if err != nil { return err } - if err := ioutil.WriteFile(gomod+".temp", contents, 0644); err != nil { + if err := os.WriteFile(gomod+".temp", contents, 0644); err != nil { return err } } exported.written[exported.primary]["go.mod"] = filepath.Join(primaryDir, "go.mod") - primaryGomod := "module " + exported.primary + "\nrequire (\n" + var primaryGomod bytes.Buffer + fmt.Fprintf(&primaryGomod, "module %s\nrequire (\n", exported.primary) for other := range exported.written { if other == exported.primary { continue @@ -112,10 +112,10 @@ func (modules) Finalize(exported *Exported) error { other = v.module version = v.version } - primaryGomod += fmt.Sprintf("\t%v %v\n", other, version) + fmt.Fprintf(&primaryGomod, "\t%v %v\n", other, version) } - primaryGomod += ")\n" - if err := ioutil.WriteFile(filepath.Join(primaryDir, "go.mod"), []byte(primaryGomod), 0644); err != nil { + fmt.Fprintf(&primaryGomod, ")\n") + if err := os.WriteFile(filepath.Join(primaryDir, "go.mod"), primaryGomod.Bytes(), 0644); err != nil { return err } @@ -136,7 +136,7 @@ func (modules) Finalize(exported *Exported) error { if v, ok := versions[module]; ok { module = v.module } - if err := ioutil.WriteFile(modfile, []byte("module "+module+"\n"), 0644); err != nil { + if err := os.WriteFile(modfile, []byte("module "+module+"\n"), 0644); err != nil { return err } files["go.mod"] = modfile @@ -172,8 +172,6 @@ func (modules) Finalize(exported *Exported) error { "GOPROXY="+proxydir.ToURL(modProxyDir), "GOSUMDB=off", ) - gocmdRunner := &gocommand.Runner{} - packagesinternal.SetGoCmdRunner(exported.Config, gocmdRunner) // Run go mod download to recreate the mod cache dir with all the extra // stuff in cache. All the files created by Export should be recreated. @@ -184,16 +182,14 @@ func (modules) Finalize(exported *Exported) error { BuildFlags: exported.Config.BuildFlags, WorkingDir: exported.Config.Dir, } - if _, err := gocmdRunner.Run(context.Background(), inv); err != nil { - return err - } - return nil + _, err := new(gocommand.Runner).Run(context.Background(), inv) + return err } func writeModuleFiles(rootDir, module, ver string, filePaths map[string]string) error { fileData := make(map[string][]byte) for name, path := range filePaths { - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { return err } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules_111.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules_111.go deleted file mode 100644 index 4b976f6fd..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/packages/packagestest/modules_111.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package packagestest - -func init() { - All = append(All, Modules) -} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go similarity index 88% rename from vendor/golang.org/x/tools/go/types/objectpath/objectpath.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index aa7dfaccf..a2386c347 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -26,15 +26,15 @@ package objectpath import ( "fmt" "go/types" - "sort" "strconv" "strings" - "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) +// TODO(adonovan): think about generic aliases. + // A Path is an opaque name that identifies a types.Object // relative to its package. Conceptually, the name consists of a // sequence of destructuring operations applied to the package scope @@ -123,8 +123,7 @@ func For(obj types.Object) (Path, error) { // An Encoder amortizes the cost of encoding the paths of multiple objects. // The zero value of an Encoder is ready to use. type Encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects } // For returns the path to an object relative to its package, @@ -139,6 +138,17 @@ type Encoder struct { // These objects are sufficient to define the API of their package. // The objects described by a package's export data are drawn from this set. // +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// // For does not return a path for predeclared names, imported package // names, local names, and unexported package-level names (except // types). @@ -216,7 +226,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -257,15 +267,14 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) + objs := enc.scopeObjects(scope) + for _, o := range objs { tname, ok := o.(*types.TypeName) if !ok { continue // handle non-types in second pass } - path := append(empty, name...) + path := append(empty, o.Name()...) path = append(path, opType) T := o.Type() @@ -277,7 +286,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -291,9 +300,8 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) + for _, o := range objs { + path := append(empty, o.Name()...) if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) @@ -305,12 +313,14 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) path2 := appendOpArg(path, opMethod, i) if m == obj { return Path(path2), nil // found declared method @@ -384,17 +394,12 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // of objectpath will only be giving us origin methods, anyway, as referring // to instantiated methods is usually not useful. - if typeparams.OriginMethod(meth) != meth { + if meth.Origin() != meth { return "", false } - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { return "", false } @@ -411,8 +416,12 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { path := make([]byte, 0, len(name)+8) path = append(path, name...) path = append(path, opType) - for i, m := range enc.namedMethods(named) { - if m == meth { + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { path = appendOpArg(path, opMethod, i) return Path(path), true } @@ -433,6 +442,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -451,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -494,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -514,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -527,11 +538,11 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { + pathstr := string(p) + if pathstr == "" { return nil, fmt.Errorf("empty path") } - pathstr := string(p) var pkgobj, suffix string if dot := strings.IndexByte(pathstr, opType); dot < 0 { pkgobj = pathstr @@ -551,7 +562,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -605,6 +616,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -653,7 +665,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } @@ -690,11 +702,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { obj = t.Method(index) // Id-ordered case *types.Named: - methods := namedMethods(t) // (unmemoized) - if index >= len(methods) { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) } - obj = methods[index] // Id-ordered + obj = t.Method(index) default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) @@ -721,44 +732,22 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { return obj, nil // success } -// namedMethods returns the methods of a Named type in ascending Id order. -func namedMethods(named *types.Named) []*types.Func { - methods := make([]*types.Func, named.NumMethods()) - for i := range methods { - methods[i] = named.Method(i) - } - sort.Slice(methods, func(i, j int) bool { - return methods[i].Id() < methods[j].Id() - }) - return methods -} - -// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { - m := enc.namedMethodsMemo - if m == nil { - m = make(map[*types.Named][]*types.Func) - enc.namedMethodsMemo = m - } - methods, ok := m[named] - if !ok { - methods = namedMethods(named) // allocates and sorts - m[named] = methods - } - return methods -} - -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *Encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m } - names, ok := m[scope] + objs, ok := m[scope] if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs } - return names + return objs } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 000000000..c24c2eee4 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// The enabled parameter determines whether the resulting [TypeName]'s +// type is an [types.Alias]. Its value must be the result of a call to +// [Enabled], which computes the effective value of +// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled +// function is expensive and should be called once per task (e.g. +// package import), not once per call to NewAlias. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 000000000..c027b9f31 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// Before go1.22, this function always returns false. +func Enabled() bool { return false } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 000000000..b32995484 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Rhs returns the type on the right-hand side of the alias declaration. +func Rhs(alias *Alias) types.Type { + if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { + return alias.Rhs() // go1.23+ + } + + // go1.22's Alias didn't have the Rhs method, + // so Unalias is the best we can do. + return Unalias(alias) +} + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// This function is expensive! Call it sparingly. +func Enabled() bool { + // The only reliable way to compute the answer is to invoke go/types. + // We don't parse the GODEBUG environment variable, because + // (a) it's tricky to do so in a manner that is consistent + // with the godebug package; in particular, a simple + // substring check is not good enough. The value is a + // rightmost-wins list of options. But more importantly: + // (b) it is impossible to detect changes to the effective + // setting caused by os.Setenv("GODEBUG"), as happens in + // many tests. Therefore any attempt to cache the result + // is just incorrect. + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) + return enabled +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/event/keys/util.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 000000000..c0e8e731c --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 000000000..d98b0db2a --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sync" +) + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000..39df91124 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,266 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 000000000..deeb67f31 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1332 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/tokeninternal" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { + const bundle = false + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + + switch obj := obj.(type) { + case *types.Var: + w.tag(varTag) + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if sig.TypeParams().Len() == 0 { + w.tag(funcTag) + } else { + w.tag(genericFuncTag) + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := sig.TypeParams(); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag(constTag) + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + w.tag(typeParamTag) + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + implicit = iface.IsImplicit() + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag(aliasTag) + w.pos(obj.Pos()) + if alias, ok := t.(*aliases.Alias); ok { + // Preserve materialized aliases, + // even of non-exported types. + t = aliases.Rhs(alias) + } + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if named.TypeParams().Len() == 0 { + w.tag(typeTag) + } else { + w.tag(genericTypeTag) + } + w.pos(obj.Pos()) + + if named.TypeParams().Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) + } + + underlying := named.Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *aliases.Alias: + // TODO(adonovan): support parameterized aliases, following *types.Named. + w.startType(aliasType) + w.qualifiedType(t.Obj()) + + case *types.Named: + if targs := t.TypeArgs(); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(t.Origin(), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *types.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.pkg(pkg) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg + if n > 0 { + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) + w.uint64(uint64(n)) + + for i := 0; i < n; i++ { + f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), fieldPkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.pkg(pkg) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *types.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return + } + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *types.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 000000000..136aa0365 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,1100 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType + aliasType +) + +// Object tags +const ( + varTag = 'V' + funcTag = 'F' + genericFuncTag = 'G' + constTag = 'C' + aliasTag = 'A' + genericAliasTag = 'B' + typeParamTag = 'P' + typeTag = 'T' + genericTypeTag = 'U' +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) +} + +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if shallow { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + aliases: aliases.Enabled(), + shallow: shallow, + reportf: reportf, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) + for i := range items { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the typeParamTag case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + d.t.SetConstraint(d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *types.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + aliases bool + shallow bool + reportf ReportFunc // if non-nil, used to report bugs + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := aliases.Unalias(rhs).(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case aliasTag: + typ := r.typ() + // TODO(adonovan): support generic aliases: + // if tag == genericAliasTag { + // tparams := r.tparamList() + // alias.SetTypeParams(tparams) + // } + r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + + case constTag: + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case funcTag, genericFuncTag: + var tparams []*types.TypeParam + if tag == genericFuncTag { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case typeTag, genericTypeTag: + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == genericTypeTag { + tparams := r.tparamList() + named.SetTypeParams(tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() + var rparams []*types.TypeParam + if targs.Len() > 0 { + rparams = make([]*types.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case typeParamTag: + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := types.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := aliases.Unalias(constraint).(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + iface.MarkImplicit() + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case varTag: + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.shallow { + // precise offsets are encoded only in shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := aliases.Unalias(t).(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case aliasType, definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + msig := r.signature(recv, nil, nil) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*types.Term, r.uint64()) + for i := range terms { + terms[i] = types.NewTerm(r.bool(), r.typ()) + } + return types.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*types.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*types.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 000000000..0cd3b91b6 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,34 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 000000000..38b624cad --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.unified +// +build !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 000000000..b5118d0b3 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.unified +// +build goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 000000000..2c0770688 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,728 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + aliases bool // create types.Alias nodes + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + aliases: aliases.Enabled(), + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 67256dc39..af0ee6c61 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,18 +8,25 @@ package gocommand import ( "bytes" "context" + "encoding/json" + "errors" "fmt" "io" + "log" "os" + "os/exec" + "path/filepath" + "reflect" "regexp" + "runtime" "strconv" "strings" "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" ) // An Runner will run go command invocations and serialize @@ -49,9 +56,22 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -59,13 +79,19 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -73,23 +99,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -99,6 +126,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -107,7 +135,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -117,7 +145,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -134,12 +162,17 @@ type Invocation struct { BuildFlags []string // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. ModFlag string // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. ModFile string - // If Overlay is set, the go command is invoked with -overlay=Overlay. + // Overlay is the name of the JSON overlay file that describes + // unsaved editor buffers; see [WriteOverlays]. + // If set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. Overlay string // If CleanEnv is set, the invocation will run only with the environment @@ -150,6 +183,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { @@ -213,12 +247,27 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. + // + // os.Getwd has a special feature where if the cwd and the PWD + // are the same node then it trusts the PWD, so by setting it + // in the env for the child process we fix up all the paths + // returned by the go command. if !i.CleanEnv { cmd.Env = os.Environ() } @@ -227,39 +276,185 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) } +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + startTime := time.Now() + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() }() - select { - case err := <-resChan: - return err - case <-ctx.Done(): + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + HandleHangingGoCommand(startTime, cmd) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } } + // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } + // Didn't shut down in response to interrupt. Kill it hard. - cmd.Process.Kill() + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { + log.Printf("error killing the Go command: %v", err) + } + return <-resChan } +func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) +} + func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { @@ -281,3 +476,73 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } + +// WriteOverlays writes each value in the overlay (see the Overlay +// field of go/packages.Config) to a temporary file and returns the name +// of a JSON file describing the mapping that is suitable for the "go +// list -overlay" flag. +// +// On success, the caller must call the cleanup function exactly once +// when the files are no longer needed. +func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(overlay) == 0 { + return "", func() {}, nil + } + + dir, err := os.MkdirTemp("", "gocommand-*") + if err != nil { + return "", nil, err + } + + // The caller must clean up this directory, + // unless this function returns an error. + // (The cleanup operand of each return + // statement below is ignored.) + defer func() { + cleanup = func() { + os.RemoveAll(dir) + } + if err != nil { + cleanup() + cleanup = nil + } + }() + + // Write each map entry to a temporary file. + overlays := make(map[string]string) + for k, v := range overlay { + // Use a unique basename for each file (001-foo.go), + // to avoid creating nested directories. + base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k)) + filename := filepath.Join(dir, base) + err := os.WriteFile(filename, v, 0666) + if err != nil { + return "", nil, err + } + overlays[k] = filename + } + + // Write the JSON overlay file that maps logical file names to temp files. + // + // OverlayJSON is the format overlay files are expected to be in. + // The Replace map maps from overlaid paths to replacement paths: + // the Go command will forward all reads trying to open + // each overlaid path to its replacement path, or consider the overlaid + // path not to exist if the replacement path is empty. + // + // From golang/go#39958. + type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", nil, err + } + filename = filepath.Join(dir, "overlay.json") + if err := os.WriteFile(filename, b, 0666); err != nil { + return "", nil, err + } + + return filename, nil, nil +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/vendor.go index 2d3d408c0..e38d1fb48 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -107,3 +107,57 @@ func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*Modul } return mod, lines[4] == "go1.14", nil } + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/version.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/version.go index 713043680..446c5846a 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -7,29 +7,27 @@ package gocommand import ( "context" "fmt" + "regexp" "strings" ) -// GoVersion checks the go version by running "go list" with modules off. -// It returns the X in Go 1.X. +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err @@ -38,7 +36,7 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { if len(stdout) < 3 { return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) } - // Split up "[go1.1 go1.15]" + // Split up "[go1.1 go1.15]" and return highest go1.X value. tags := strings.Fields(stdout[1 : len(stdout)-2]) for i := len(tags) - 1; i >= 0; i-- { var version int @@ -49,3 +47,25 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { } return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) } + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/goroot/importcfg.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/goroot/importcfg.go new file mode 100644 index 000000000..f1cd28e2e --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/goroot/importcfg.go @@ -0,0 +1,71 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goroot is a copy of package internal/goroot +// in the main GO repot. It provides a utility to produce +// an importcfg and import path to package file map mapping +// standard library packages to the locations of their export +// data files. +package goroot + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "sync" +) + +// Importcfg returns an importcfg file to be passed to the +// Go compiler that contains the cached paths for the .a files for the +// standard library. +func Importcfg() (string, error) { + var icfg bytes.Buffer + + m, err := PkgfileMap() + if err != nil { + return "", err + } + fmt.Fprintf(&icfg, "# import config") + for importPath, export := range m { + fmt.Fprintf(&icfg, "\npackagefile %s=%s", importPath, export) + } + s := icfg.String() + return s, nil +} + +var ( + stdlibPkgfileMap map[string]string + stdlibPkgfileErr error + once sync.Once +) + +// PkgfileMap returns a map of package paths to the location on disk +// of the .a file for the package. +// The caller must not modify the map. +func PkgfileMap() (map[string]string, error) { + once.Do(func() { + m := make(map[string]string) + output, err := exec.Command("go", "list", "-export", "-e", "-f", "{{.ImportPath}} {{.Export}}", "std", "cmd").Output() + if err != nil { + stdlibPkgfileErr = err + } + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + sp := strings.SplitN(line, " ", 2) + if len(sp) != 2 { + err = fmt.Errorf("determining pkgfile map: invalid line in go list output: %q", line) + return + } + importPath, export := sp[0], sp[1] + if export != "" { + m[importPath] = export + } + } + stdlibPkgfileMap = m + }) + return stdlibPkgfileMap, stdlibPkgfileErr +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/lsp/bug/bug.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/lsp/bug/bug.go deleted file mode 100644 index b974e88e1..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/lsp/bug/bug.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bug provides utilities for reporting internal bugs, and being -// notified when they occur. -// -// Philosophically, because gopls runs as a sidecar process that the user does -// not directly control, sometimes it keeps going on broken invariants rather -// than panicking. In those cases, bug reports provide a mechanism to alert -// developers and capture relevant metadata. -package bug - -import ( - "fmt" - "runtime" - "runtime/debug" - "sort" - "sync" -) - -// PanicOnBugs controls whether to panic when bugs are reported. -// -// It may be set to true during testing. -var PanicOnBugs = false - -var ( - mu sync.Mutex - exemplars map[string]Bug - waiters []chan<- Bug -) - -// A Bug represents an unexpected event or broken invariant. They are used for -// capturing metadata that helps us understand the event. -type Bug struct { - File string // file containing the call to bug.Report - Line int // line containing the call to bug.Report - Description string // description of the bug - Data Data // additional metadata - Key string // key identifying the bug (file:line if available) - Stack string // call stack -} - -// Data is additional metadata to record for a bug. -type Data map[string]interface{} - -// Reportf reports a formatted bug message. -func Reportf(format string, args ...interface{}) { - Report(fmt.Sprintf(format, args...), nil) -} - -// Errorf calls fmt.Errorf for the given arguments, and reports the resulting -// error message as a bug. -func Errorf(format string, args ...interface{}) error { - err := fmt.Errorf(format, args...) - Report(err.Error(), nil) - return err -} - -// Report records a new bug encountered on the server. -// It uses reflection to report the position of the immediate caller. -func Report(description string, data Data) { - _, file, line, ok := runtime.Caller(1) - - key := "" - if ok { - key = fmt.Sprintf("%s:%d", file, line) - } - - if PanicOnBugs { - panic(fmt.Sprintf("%s: %s", key, description)) - } - - bug := Bug{ - File: file, - Line: line, - Description: description, - Data: data, - Key: key, - Stack: string(debug.Stack()), - } - - mu.Lock() - defer mu.Unlock() - - if exemplars == nil { - exemplars = make(map[string]Bug) - } - - if _, ok := exemplars[key]; !ok { - exemplars[key] = bug // capture one exemplar per key - } - - for _, waiter := range waiters { - waiter <- bug - } - waiters = nil -} - -// Notify returns a channel that will be sent the next bug to occur on the -// server. This channel only ever receives one bug. -func Notify() <-chan Bug { - mu.Lock() - defer mu.Unlock() - - ch := make(chan Bug, 1) // 1-buffered so that bug reporting is non-blocking - waiters = append(waiters, ch) - return ch -} - -// List returns a slice of bug exemplars -- the first bugs to occur at each -// callsite. -func List() []Bug { - mu.Lock() - defer mu.Unlock() - - var bugs []Bug - - for _, bug := range exemplars { - bugs = append(bugs, bug) - } - - sort.Slice(bugs, func(i, j int) bool { - return bugs[i].Key < bugs[j].Key - }) - - return bugs -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f0..44719de17 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/codes.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/codes.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 000000000..2acd85851 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,521 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // aliases determines whether types.Aliases should be created + aliases bool + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + //aliases: aliases.Enabled(), + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/doc.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/doc.go diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/encoder.go similarity index 100% rename from vendor/golang.org/x/tools/internal/pkgbits/encoder.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/encoder.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/flags.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/flags.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/reloc.go similarity index 100% rename from vendor/golang.org/x/tools/internal/pkgbits/reloc.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/reloc.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/support.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/support.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/support.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/support.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/sync.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/sync.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go similarity index 100% rename from MobileLibrary/go-mobile/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/proxydir/proxydir.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/proxydir/proxydir.go index 518020406..ffec81c26 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/proxydir/proxydir.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/proxydir/proxydir.go @@ -11,7 +11,6 @@ import ( "archive/zip" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -44,13 +43,13 @@ func WriteModuleVersion(rootDir, module, ver string, files map[string][]byte) (r if !ok { modContents = []byte("module " + module) } - if err := ioutil.WriteFile(filepath.Join(dir, ver+".mod"), modContents, 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, ver+".mod"), modContents, 0644); err != nil { return err } // info file, just the bare bones. infoContents := []byte(fmt.Sprintf(`{"Version": "%v", "Time":"2017-12-14T13:08:43Z"}`, ver)) - if err := ioutil.WriteFile(filepath.Join(dir, ver+".info"), infoContents, 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, ver+".info"), infoContents, 0644); err != nil { return err } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/parse.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/parse.go deleted file mode 100644 index c4cec16e9..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/parse.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "path/filepath" - "strconv" - "strings" - "unicode/utf8" -) - -// Parse returns the location represented by the input. -// Only file paths are accepted, not URIs. -// The returned span will be normalized, and thus if printed may produce a -// different string. -func Parse(input string) Span { - return ParseInDir(input, ".") -} - -// ParseInDir is like Parse, but interprets paths relative to wd. -func ParseInDir(input, wd string) Span { - uri := func(path string) URI { - if !filepath.IsAbs(path) { - path = filepath.Join(wd, path) - } - return URIFromPath(path) - } - // :0:0#0-0:0#0 - valid := input - var hold, offset int - hadCol := false - suf := rstripSuffix(input) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep == ":" { - valid = suf.remains - hold = suf.num - hadCol = true - suf = rstripSuffix(suf.remains) - } - switch { - case suf.sep == ":": - return New(uri(suf.remains), NewPoint(suf.num, hold, offset), Point{}) - case suf.sep == "-": - // we have a span, fall out of the case to continue - default: - // separator not valid, rewind to either the : or the start - return New(uri(valid), NewPoint(hold, 0, offset), Point{}) - } - // only the span form can get here - // at this point we still don't know what the numbers we have mean - // if have not yet seen a : then we might have either a line or a column depending - // on whether start has a column or not - // we build an end point and will fix it later if needed - end := NewPoint(suf.num, hold, offset) - hold, offset = 0, 0 - suf = rstripSuffix(suf.remains) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep != ":" { - // turns out we don't have a span after all, rewind - return New(uri(valid), end, Point{}) - } - valid = suf.remains - hold = suf.num - suf = rstripSuffix(suf.remains) - if suf.sep != ":" { - // line#offset only - return New(uri(valid), NewPoint(hold, 0, offset), end) - } - // we have a column, so if end only had one number, it is also the column - if !hadCol { - end = NewPoint(suf.num, end.v.Line, end.v.Offset) - } - return New(uri(suf.remains), NewPoint(suf.num, hold, offset), end) -} - -type suffix struct { - remains string - sep string - num int -} - -func rstripSuffix(input string) suffix { - if len(input) == 0 { - return suffix{"", "", -1} - } - remains := input - num := -1 - // first see if we have a number at the end - last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) - if last >= 0 && last < len(remains)-1 { - number, err := strconv.ParseInt(remains[last+1:], 10, 64) - if err == nil { - num = int(number) - remains = remains[:last+1] - } - } - // now see if we have a trailing separator - r, w := utf8.DecodeLastRuneInString(remains) - if r != ':' && r != '#' && r == '#' { - return suffix{input, "", -1} - } - remains = remains[:len(remains)-w] - return suffix{remains, string(r), num} -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/span.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/span.go deleted file mode 100644 index 502145bbe..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/span.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package span contains support for representing with positions and ranges in -// text files. -package span - -import ( - "encoding/json" - "fmt" - "go/token" - "path" -) - -// Span represents a source code range in standardized form. -type Span struct { - v span -} - -// Point represents a single point within a file. -// In general this should only be used as part of a Span, as on its own it -// does not carry enough information. -type Point struct { - v point -} - -type span struct { - URI URI `json:"uri"` - Start point `json:"start"` - End point `json:"end"` -} - -type point struct { - Line int `json:"line"` - Column int `json:"column"` - Offset int `json:"offset"` -} - -// Invalid is a span that reports false from IsValid -var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} - -var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} - -func New(uri URI, start Point, end Point) Span { - s := Span{v: span{URI: uri, Start: start.v, End: end.v}} - s.v.clean() - return s -} - -func NewPoint(line, col, offset int) Point { - p := Point{v: point{Line: line, Column: col, Offset: offset}} - p.v.clean() - return p -} - -func Compare(a, b Span) int { - if r := CompareURI(a.URI(), b.URI()); r != 0 { - return r - } - if r := comparePoint(a.v.Start, b.v.Start); r != 0 { - return r - } - return comparePoint(a.v.End, b.v.End) -} - -func ComparePoint(a, b Point) int { - return comparePoint(a.v, b.v) -} - -func comparePoint(a, b point) int { - if !a.hasPosition() { - if a.Offset < b.Offset { - return -1 - } - if a.Offset > b.Offset { - return 1 - } - return 0 - } - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Column < b.Column { - return -1 - } - if a.Column > b.Column { - return 1 - } - return 0 -} - -func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } -func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } -func (s Span) IsValid() bool { return s.v.Start.isValid() } -func (s Span) IsPoint() bool { return s.v.Start == s.v.End } -func (s Span) URI() URI { return s.v.URI } -func (s Span) Start() Point { return Point{s.v.Start} } -func (s Span) End() Point { return Point{s.v.End} } -func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } -func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } - -func (p Point) HasPosition() bool { return p.v.hasPosition() } -func (p Point) HasOffset() bool { return p.v.hasOffset() } -func (p Point) IsValid() bool { return p.v.isValid() } -func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } -func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } -func (p Point) Line() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Line -} -func (p Point) Column() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Column -} -func (p Point) Offset() int { - if !p.v.hasOffset() { - panic(fmt.Errorf("offset not set in %v", p.v)) - } - return p.v.Offset -} - -func (p point) hasPosition() bool { return p.Line > 0 } -func (p point) hasOffset() bool { return p.Offset >= 0 } -func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } -func (p point) isZero() bool { - return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) -} - -func (s *span) clean() { - //this presumes the points are already clean - if !s.End.isValid() || (s.End == point{}) { - s.End = s.Start - } -} - -func (p *point) clean() { - if p.Line < 0 { - p.Line = 0 - } - if p.Column <= 0 { - if p.Line > 0 { - p.Column = 1 - } else { - p.Column = 0 - } - } - if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { - p.Offset = -1 - } -} - -// Format implements fmt.Formatter to print the Location in a standard form. -// The format produced is one that can be read back in using Parse. -func (s Span) Format(f fmt.State, c rune) { - fullForm := f.Flag('+') - preferOffset := f.Flag('#') - // we should always have a uri, simplify if it is file format - //TODO: make sure the end of the uri is unambiguous - uri := string(s.v.URI) - if c == 'f' { - uri = path.Base(uri) - } else if !fullForm { - uri = s.v.URI.Filename() - } - fmt.Fprint(f, uri) - if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { - return - } - // see which bits of start to write - printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) - printLine := s.HasPosition() && (fullForm || !printOffset) - printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) - fmt.Fprint(f, ":") - if printLine { - fmt.Fprintf(f, "%d", s.v.Start.Line) - } - if printColumn { - fmt.Fprintf(f, ":%d", s.v.Start.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.Start.Offset) - } - // start is written, do we need end? - if s.IsPoint() { - return - } - // we don't print the line if it did not change - printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) - fmt.Fprint(f, "-") - if printLine { - fmt.Fprintf(f, "%d", s.v.End.Line) - } - if printColumn { - if printLine { - fmt.Fprint(f, ":") - } - fmt.Fprintf(f, "%d", s.v.End.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.End.Offset) - } -} - -func (s Span) WithPosition(tf *token.File) (Span, error) { - if err := s.update(tf, true, false); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithOffset(tf *token.File) (Span, error) { - if err := s.update(tf, false, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithAll(tf *token.File) (Span, error) { - if err := s.update(tf, true, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s *Span) update(tf *token.File, withPos, withOffset bool) error { - if !s.IsValid() { - return fmt.Errorf("cannot add information to an invalid span") - } - if withPos && !s.HasPosition() { - if err := s.v.Start.updatePosition(tf); err != nil { - return err - } - if s.v.End.Offset == s.v.Start.Offset { - s.v.End = s.v.Start - } else if err := s.v.End.updatePosition(tf); err != nil { - return err - } - } - if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { - if err := s.v.Start.updateOffset(tf); err != nil { - return err - } - if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { - s.v.End.Offset = s.v.Start.Offset - } else if err := s.v.End.updateOffset(tf); err != nil { - return err - } - } - return nil -} - -func (p *point) updatePosition(tf *token.File) error { - line, col, err := ToPosition(tf, p.Offset) - if err != nil { - return err - } - p.Line = line - p.Column = col - return nil -} - -func (p *point) updateOffset(tf *token.File) error { - offset, err := ToOffset(tf, p.Line, p.Column) - if err != nil { - return err - } - p.Offset = offset - return nil -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/token.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/token.go deleted file mode 100644 index c35a512c1..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/token.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "go/token" - - "golang.org/x/tools/internal/lsp/bug" -) - -// Range represents a source code range in token.Pos form. -// It also carries the token.File that produced the positions, so that it is -// self contained. -type Range struct { - TokFile *token.File // non-nil - Start, End token.Pos // both IsValid() -} - -// NewRange creates a new Range from a token.File and two valid positions within it. -// -// (If you only have a token.FileSet, use file = fset.File(start). But -// most callers know exactly which token.File they're dealing with and -// should pass it explicitly. Not only does this save a lookup, but it -// brings us a step closer to eliminating the global FileSet.) -func NewRange(file *token.File, start, end token.Pos) Range { - if file == nil { - panic("nil *token.File") - } - if !start.IsValid() || !end.IsValid() { - panic("invalid start/end token.Pos") - } - - // TODO(adonovan): ideally we would make this stronger assertion: - // - // // Assert that file is non-nil and contains start and end. - // _ = file.Offset(start) - // _ = file.Offset(end) - // - // but some callers (e.g. packageCompletionSurrounding, - // posToMappedRange) don't ensure this precondition. - - return Range{ - TokFile: file, - Start: start, - End: end, - } -} - -// NewTokenFile returns a token.File for the given file content. -func NewTokenFile(filename string, content []byte) *token.File { - fset := token.NewFileSet() - f := fset.AddFile(filename, -1, len(content)) - f.SetLinesForContent(content) - return f -} - -// IsPoint returns true if the range represents a single point. -func (r Range) IsPoint() bool { - return r.Start == r.End -} - -// Span converts a Range to a Span that represents the Range. -// It will fill in all the members of the Span, calculating the line and column -// information. -func (r Range) Span() (Span, error) { - return FileSpan(r.TokFile, r.TokFile, r.Start, r.End) -} - -// FileSpan returns a span within the file referenced by start and end, using a -// token.File to translate between offsets and positions. -// -// The start and end position must be contained within posFile, though due to -// line directives they may reference positions in another file. If srcFile is -// provided, it is used to map the line:column positions referenced by start -// and end to offsets in the corresponding file. -func FileSpan(posFile, srcFile *token.File, start, end token.Pos) (Span, error) { - if !start.IsValid() { - return Span{}, fmt.Errorf("start pos is not valid") - } - if posFile == nil { - return Span{}, bug.Errorf("missing file association") // should never get here with a nil file - } - var s Span - var err error - var startFilename string - startFilename, s.v.Start.Line, s.v.Start.Column, err = position(posFile, start) - if err != nil { - return Span{}, err - } - s.v.URI = URIFromPath(startFilename) - if end.IsValid() { - var endFilename string - endFilename, s.v.End.Line, s.v.End.Column, err = position(posFile, end) - if err != nil { - return Span{}, err - } - // In the presence of line directives, a single File can have sections from - // multiple file names. - if endFilename != startFilename { - return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) - } - } - s.v.Start.clean() - s.v.End.clean() - s.v.clean() - tf := posFile - if srcFile != nil { - tf = srcFile - } - if startFilename != tf.Name() { - return Span{}, bug.Errorf("must supply Converter for file %q", startFilename) - } - return s.WithOffset(tf) -} - -func position(tf *token.File, pos token.Pos) (string, int, int, error) { - off, err := offset(tf, pos) - if err != nil { - return "", 0, 0, err - } - return positionFromOffset(tf, off) -} - -func positionFromOffset(tf *token.File, offset int) (string, int, int, error) { - if offset > tf.Size() { - return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, tf.Size()) - } - pos := tf.Pos(offset) - p := tf.Position(pos) - // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if - // the file's last character is not a newline. - if offset == tf.Size() { - return p.Filename, p.Line + 1, 1, nil - } - return p.Filename, p.Line, p.Column, nil -} - -// offset is a copy of the Offset function in go/token, but with the adjustment -// that it does not panic on invalid positions. -func offset(tf *token.File, pos token.Pos) (int, error) { - if int(pos) < tf.Base() || int(pos) > tf.Base()+tf.Size() { - return 0, fmt.Errorf("invalid pos: %d not in [%d, %d]", pos, tf.Base(), tf.Base()+tf.Size()) - } - return int(pos) - tf.Base(), nil -} - -// Range converts a Span to a Range that represents the Span for the supplied -// File. -func (s Span) Range(tf *token.File) (Range, error) { - s, err := s.WithOffset(tf) - if err != nil { - return Range{}, err - } - // go/token will panic if the offset is larger than the file's size, - // so check here to avoid panicking. - if s.Start().Offset() > tf.Size() { - return Range{}, bug.Errorf("start offset %v is past the end of the file %v", s.Start(), tf.Size()) - } - if s.End().Offset() > tf.Size() { - return Range{}, bug.Errorf("end offset %v is past the end of the file %v", s.End(), tf.Size()) - } - return Range{ - Start: tf.Pos(s.Start().Offset()), - End: tf.Pos(s.End().Offset()), - TokFile: tf, - }, nil -} - -// ToPosition converts a byte offset in the file corresponding to tf into -// 1-based line and utf-8 column indexes. -func ToPosition(tf *token.File, offset int) (int, int, error) { - _, line, col, err := positionFromOffset(tf, offset) - return line, col, err -} - -// ToOffset converts a 1-based line and utf-8 column index into a byte offset -// in the file corresponding to tf. -func ToOffset(tf *token.File, line, col int) (int, error) { - if line < 1 { // token.File.LineStart panics if line < 1 - return -1, fmt.Errorf("invalid line: %d", line) - } - - lineMax := tf.LineCount() + 1 - if line > lineMax { - return -1, fmt.Errorf("line %d is beyond end of file %v", line, lineMax) - } else if line == lineMax { - if col > 1 { - return -1, fmt.Errorf("column is beyond end of file") - } - // at the end of the file, allowing for a trailing eol - return tf.Size(), nil - } - pos := tf.LineStart(line) - if !pos.IsValid() { - // bug.Errorf here because LineStart panics on out-of-bound input, and so - // should never return invalid positions. - return -1, bug.Errorf("line is not in file") - } - // we assume that column is in bytes here, and that the first byte of a - // line is at column 1 - pos += token.Pos(col - 1) - return offset(tf, pos) -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/uri.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/uri.go deleted file mode 100644 index 8132665d7..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/uri.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "unicode" -) - -const fileScheme = "file" - -// URI represents the full URI for a file. -type URI string - -func (uri URI) IsFile() bool { - return strings.HasPrefix(string(uri), "file://") -} - -// Filename returns the file path for the given URI. -// It is an error to call this on a URI that is not a valid filename. -func (uri URI) Filename() string { - filename, err := filename(uri) - if err != nil { - panic(err) - } - return filepath.FromSlash(filename) -} - -func filename(uri URI) (string, error) { - if uri == "" { - return "", nil - } - - // This conservative check for the common case - // of a simple non-empty absolute POSIX filename - // avoids the allocation of a net.URL. - if strings.HasPrefix(string(uri), "file:///") { - rest := string(uri)[len("file://"):] // leave one slash - for i := 0; i < len(rest); i++ { - b := rest[i] - // Reject these cases: - if b < ' ' || b == 0x7f || // control character - b == '%' || b == '+' || // URI escape - b == ':' || // Windows drive letter - b == '@' || b == '&' || b == '?' { // authority or query - goto slow - } - } - return rest, nil - } -slow: - - u, err := url.ParseRequestURI(string(uri)) - if err != nil { - return "", err - } - if u.Scheme != fileScheme { - return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) - } - // If the URI is a Windows URI, we trim the leading "/" and uppercase - // the drive letter, which will never be case sensitive. - if isWindowsDriveURIPath(u.Path) { - u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] - } - - return u.Path, nil -} - -func URIFromURI(s string) URI { - if !strings.HasPrefix(s, "file://") { - return URI(s) - } - - if !strings.HasPrefix(s, "file:///") { - // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789. - s = "file:///" + s[len("file://"):] - } - // Even though the input is a URI, it may not be in canonical form. VS Code - // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize. - path, err := url.PathUnescape(s[len("file://"):]) - if err != nil { - panic(err) - } - - // File URIs from Windows may have lowercase drive letters. - // Since drive letters are guaranteed to be case insensitive, - // we change them to uppercase to remain consistent. - // For example, file:///c:/x/y/z becomes file:///C:/x/y/z. - if isWindowsDriveURIPath(path) { - path = path[:1] + strings.ToUpper(string(path[1])) + path[2:] - } - u := url.URL{Scheme: fileScheme, Path: path} - return URI(u.String()) -} - -// CompareURI performs a three-valued comparison of two URIs. -// Lexically unequal URIs may compare equal if they are "file:" URIs -// that share the same base name (ignoring case) and denote the same -// file device/inode, according to stat(2). -func CompareURI(a, b URI) int { - if equalURI(a, b) { - return 0 - } - if a < b { - return -1 - } - return 1 -} - -func equalURI(a, b URI) bool { - if a == b { - return true - } - // If we have the same URI basename, we may still have the same file URIs. - if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { - return false - } - fa, err := filename(a) - if err != nil { - return false - } - fb, err := filename(b) - if err != nil { - return false - } - // Stat the files to check if they are equal. - infoa, err := os.Stat(filepath.FromSlash(fa)) - if err != nil { - return false - } - infob, err := os.Stat(filepath.FromSlash(fb)) - if err != nil { - return false - } - return os.SameFile(infoa, infob) -} - -// URIFromPath returns a span URI for the supplied file path. -// It will always have the file scheme. -func URIFromPath(path string) URI { - if path == "" { - return "" - } - // Handle standard library paths that contain the literal "$GOROOT". - // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. - const prefix = "$GOROOT" - if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { - suffix := path[len(prefix):] - path = runtime.GOROOT() + suffix - } - if !isWindowsDrivePath(path) { - if abs, err := filepath.Abs(path); err == nil { - path = abs - } - } - // Check the file path again, in case it became absolute. - if isWindowsDrivePath(path) { - path = "/" + strings.ToUpper(string(path[0])) + path[1:] - } - path = filepath.ToSlash(path) - u := url.URL{ - Scheme: fileScheme, - Path: path, - } - return URI(u.String()) -} - -// isWindowsDrivePath returns true if the file path is of the form used by -// Windows. We check if the path begins with a drive letter, followed by a ":". -// For example: C:/x/y/z. -func isWindowsDrivePath(path string) bool { - if len(path) < 3 { - return false - } - return unicode.IsLetter(rune(path[0])) && path[1] == ':' -} - -// isWindowsDriveURI returns true if the file URI is of the format used by -// Windows URIs. The url.Parse package does not specially handle Windows paths -// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). -func isWindowsDriveURIPath(uri string) bool { - if len(uri) < 4 { - return false - } - return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/utf16.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/utf16.go deleted file mode 100644 index f4c93a6ea..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/span/utf16.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "unicode/utf8" -) - -// ToUTF16Column calculates the utf16 column expressed by the point given the -// supplied file contents. -// This is used to convert from the native (always in bytes) column -// representation and the utf16 counts used by some editors. -func ToUTF16Column(p Point, content []byte) (int, error) { - if !p.HasPosition() { - return -1, fmt.Errorf("ToUTF16Column: point is missing position") - } - if !p.HasOffset() { - return -1, fmt.Errorf("ToUTF16Column: point is missing offset") - } - offset := p.Offset() // 0-based - colZero := p.Column() - 1 // 0-based - if colZero == 0 { - // 0-based column 0, so it must be chr 1 - return 1, nil - } else if colZero < 0 { - return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) - } - // work out the offset at the start of the line using the column - lineOffset := offset - colZero - if lineOffset < 0 || offset > len(content) { - return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) - } - // Use the offset to pick out the line start. - // This cannot panic: offset > len(content) and lineOffset < offset. - start := content[lineOffset:] - - // Now, truncate down to the supplied column. - start = start[:colZero] - - cnt := 0 - for _, r := range string(start) { - cnt++ - if r > 0xffff { - cnt++ - } - } - return cnt + 1, nil // the +1 is for 1-based columns -} - -// FromUTF16Column advances the point by the utf16 character offset given the -// supplied line contents. -// This is used to convert from the utf16 counts used by some editors to the -// native (always in bytes) column representation. -// -// The resulting Point always has an offset. -// -// TODO: it looks like this may incorrectly confer a "position" to the -// resulting Point, when it shouldn't. If p.HasPosition() == false, the -// resulting Point will return p.HasPosition() == true, but have the wrong -// position. -func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { - if !p.HasOffset() { - return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") - } - // if chr is 1 then no adjustment needed - if chr <= 1 { - return p, nil - } - if p.Offset() >= len(content) { - return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) - } - remains := content[p.Offset():] - // scan forward the specified number of characters - for count := 1; count < chr; count++ { - if len(remains) <= 0 { - return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") - } - r, w := utf8.DecodeRune(remains) - if r == '\n' { - // Per the LSP spec: - // - // > If the character value is greater than the line length it - // > defaults back to the line length. - break - } - remains = remains[w:] - if r >= 0x10000 { - // a two point rune - count++ - // if we finished in a two point rune, do not advance past the first - if count >= chr { - break - } - } - p.v.Column += w - p.v.Offset += w - } - return p, nil -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 000000000..fd6892075 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17320 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteHeader", Method, 0}, + {"(Format).String", Method, 10}, + {"ErrFieldTooLong", Var, 0}, + {"ErrHeader", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"ErrWriteAfterClose", Var, 0}, + {"ErrWriteTooLong", Var, 0}, + {"FileInfoHeader", Func, 1}, + {"Format", Type, 10}, + {"FormatGNU", Const, 10}, + {"FormatPAX", Const, 10}, + {"FormatUSTAR", Const, 10}, + {"FormatUnknown", Const, 10}, + {"Header", Type, 0}, + {"Header.AccessTime", Field, 0}, + {"Header.ChangeTime", Field, 0}, + {"Header.Devmajor", Field, 0}, + {"Header.Devminor", Field, 0}, + {"Header.Format", Field, 10}, + {"Header.Gid", Field, 0}, + {"Header.Gname", Field, 0}, + {"Header.Linkname", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Mode", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.PAXRecords", Field, 10}, + {"Header.Size", Field, 0}, + {"Header.Typeflag", Field, 0}, + {"Header.Uid", Field, 0}, + {"Header.Uname", Field, 0}, + {"Header.Xattrs", Field, 3}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Reader", Type, 0}, + {"TypeBlock", Const, 0}, + {"TypeChar", Const, 0}, + {"TypeCont", Const, 0}, + {"TypeDir", Const, 0}, + {"TypeFifo", Const, 0}, + {"TypeGNULongLink", Const, 1}, + {"TypeGNULongName", Const, 1}, + {"TypeGNUSparse", Const, 3}, + {"TypeLink", Const, 0}, + {"TypeReg", Const, 0}, + {"TypeRegA", Const, 0}, + {"TypeSymlink", Const, 0}, + {"TypeXGlobalHeader", Const, 0}, + {"TypeXHeader", Const, 0}, + {"Writer", Type, 0}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2}, + {"(*File).FileInfo", Method, 0}, + {"(*File).ModTime", Method, 0}, + {"(*File).Mode", Method, 0}, + {"(*File).Open", Method, 0}, + {"(*File).OpenRaw", Method, 17}, + {"(*File).SetModTime", Method, 0}, + {"(*File).SetMode", Method, 0}, + {"(*FileHeader).FileInfo", Method, 0}, + {"(*FileHeader).ModTime", Method, 0}, + {"(*FileHeader).Mode", Method, 0}, + {"(*FileHeader).SetModTime", Method, 0}, + {"(*FileHeader).SetMode", Method, 0}, + {"(*ReadCloser).Close", Method, 0}, + {"(*ReadCloser).Open", Method, 16}, + {"(*ReadCloser).RegisterDecompressor", Method, 6}, + {"(*Reader).Open", Method, 16}, + {"(*Reader).RegisterDecompressor", Method, 6}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Copy", Method, 17}, + {"(*Writer).Create", Method, 0}, + {"(*Writer).CreateHeader", Method, 0}, + {"(*Writer).CreateRaw", Method, 17}, + {"(*Writer).Flush", Method, 4}, + {"(*Writer).RegisterCompressor", Method, 6}, + {"(*Writer).SetComment", Method, 10}, + {"(*Writer).SetOffset", Method, 5}, + {"Compressor", Type, 2}, + {"Decompressor", Type, 2}, + {"Deflate", Const, 0}, + {"ErrAlgorithm", Var, 0}, + {"ErrChecksum", Var, 0}, + {"ErrFormat", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.CRC32", Field, 0}, + {"FileHeader.Comment", Field, 0}, + {"FileHeader.CompressedSize", Field, 0}, + {"FileHeader.CompressedSize64", Field, 1}, + {"FileHeader.CreatorVersion", Field, 0}, + {"FileHeader.ExternalAttrs", Field, 0}, + {"FileHeader.Extra", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Method", Field, 0}, + {"FileHeader.Modified", Field, 10}, + {"FileHeader.ModifiedDate", Field, 0}, + {"FileHeader.ModifiedTime", Field, 0}, + {"FileHeader.Name", Field, 0}, + {"FileHeader.NonUTF8", Field, 10}, + {"FileHeader.ReaderVersion", Field, 0}, + {"FileHeader.UncompressedSize", Field, 0}, + {"FileHeader.UncompressedSize64", Field, 1}, + {"FileInfoHeader", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"OpenReader", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadCloser.Reader", Field, 0}, + {"Reader", Type, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.File", Field, 0}, + {"RegisterCompressor", Func, 2}, + {"RegisterDecompressor", Func, 2}, + {"Store", Const, 0}, + {"Writer", Type, 0}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0}, + {"(*Reader).Discard", Method, 5}, + {"(*Reader).Peek", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadBytes", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).ReadSlice", Method, 0}, + {"(*Reader).ReadString", Method, 0}, + {"(*Reader).Reset", Method, 2}, + {"(*Reader).Size", Method, 10}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Scanner).Buffer", Method, 6}, + {"(*Scanner).Bytes", Method, 1}, + {"(*Scanner).Err", Method, 1}, + {"(*Scanner).Scan", Method, 1}, + {"(*Scanner).Split", Method, 1}, + {"(*Scanner).Text", Method, 1}, + {"(*Writer).Available", Method, 0}, + {"(*Writer).AvailableBuffer", Method, 18}, + {"(*Writer).Buffered", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).ReadFrom", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Size", Method, 10}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteByte", Method, 0}, + {"(*Writer).WriteRune", Method, 0}, + {"(*Writer).WriteString", Method, 0}, + {"(ReadWriter).Available", Method, 0}, + {"(ReadWriter).AvailableBuffer", Method, 18}, + {"(ReadWriter).Discard", Method, 5}, + {"(ReadWriter).Flush", Method, 0}, + {"(ReadWriter).Peek", Method, 0}, + {"(ReadWriter).Read", Method, 0}, + {"(ReadWriter).ReadByte", Method, 0}, + {"(ReadWriter).ReadBytes", Method, 0}, + {"(ReadWriter).ReadFrom", Method, 1}, + {"(ReadWriter).ReadLine", Method, 0}, + {"(ReadWriter).ReadRune", Method, 0}, + {"(ReadWriter).ReadSlice", Method, 0}, + {"(ReadWriter).ReadString", Method, 0}, + {"(ReadWriter).UnreadByte", Method, 0}, + {"(ReadWriter).UnreadRune", Method, 0}, + {"(ReadWriter).Write", Method, 0}, + {"(ReadWriter).WriteByte", Method, 0}, + {"(ReadWriter).WriteRune", Method, 0}, + {"(ReadWriter).WriteString", Method, 0}, + {"(ReadWriter).WriteTo", Method, 1}, + {"ErrAdvanceTooFar", Var, 1}, + {"ErrBadReadCount", Var, 15}, + {"ErrBufferFull", Var, 0}, + {"ErrFinalToken", Var, 6}, + {"ErrInvalidUnreadByte", Var, 0}, + {"ErrInvalidUnreadRune", Var, 0}, + {"ErrNegativeAdvance", Var, 1}, + {"ErrNegativeCount", Var, 0}, + {"ErrTooLong", Var, 1}, + {"MaxScanTokenSize", Const, 1}, + {"NewReadWriter", Func, 0}, + {"NewReader", Func, 0}, + {"NewReaderSize", Func, 0}, + {"NewScanner", Func, 1}, + {"NewWriter", Func, 0}, + {"NewWriterSize", Func, 0}, + {"ReadWriter", Type, 0}, + {"ReadWriter.Reader", Field, 0}, + {"ReadWriter.Writer", Field, 0}, + {"Reader", Type, 0}, + {"ScanBytes", Func, 1}, + {"ScanLines", Func, 1}, + {"ScanRunes", Func, 1}, + {"ScanWords", Func, 1}, + {"Scanner", Type, 1}, + {"SplitFunc", Type, 1}, + {"Writer", Type, 0}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21}, + {"(*Buffer).AvailableBuffer", Method, 21}, + {"(*Buffer).Bytes", Method, 0}, + {"(*Buffer).Cap", Method, 5}, + {"(*Buffer).Grow", Method, 1}, + {"(*Buffer).Len", Method, 0}, + {"(*Buffer).Next", Method, 0}, + {"(*Buffer).Read", Method, 0}, + {"(*Buffer).ReadByte", Method, 0}, + {"(*Buffer).ReadBytes", Method, 0}, + {"(*Buffer).ReadFrom", Method, 0}, + {"(*Buffer).ReadRune", Method, 0}, + {"(*Buffer).ReadString", Method, 0}, + {"(*Buffer).Reset", Method, 0}, + {"(*Buffer).String", Method, 0}, + {"(*Buffer).Truncate", Method, 0}, + {"(*Buffer).UnreadByte", Method, 0}, + {"(*Buffer).UnreadRune", Method, 0}, + {"(*Buffer).Write", Method, 0}, + {"(*Buffer).WriteByte", Method, 0}, + {"(*Buffer).WriteRune", Method, 0}, + {"(*Buffer).WriteString", Method, 0}, + {"(*Buffer).WriteTo", Method, 0}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"Buffer", Type, 0}, + {"Clone", Func, 20}, + {"Compare", Func, 0}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 7}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 7}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"Equal", Func, 0}, + {"EqualFold", Func, 0}, + {"ErrTooLarge", Var, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 0}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"MinRead", Const, 0}, + {"NewBuffer", Func, 0}, + {"NewBufferString", Func, 0}, + {"NewReader", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Runes", Func, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "cmp": { + {"Compare", Func, 21}, + {"Less", Func, 21}, + {"Or", Func, 22}, + {"Ordered", Type, 21}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0}, + {"NewReader", Func, 0}, + {"StructuralError", Type, 0}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0}, + {"(*WriteError).Error", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(InternalError).Error", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"CorruptInputError", Type, 0}, + {"DefaultCompression", Const, 0}, + {"HuffmanOnly", Const, 7}, + {"InternalError", Type, 0}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterDict", Func, 0}, + {"NoCompression", Const, 0}, + {"ReadError", Type, 0}, + {"ReadError.Err", Field, 0}, + {"ReadError.Offset", Field, 0}, + {"Reader", Type, 0}, + {"Resetter", Type, 4}, + {"WriteError", Type, 0}, + {"WriteError.Err", Field, 0}, + {"WriteError.Offset", Field, 0}, + {"Writer", Type, 0}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0}, + {"(*Reader).Multistream", Method, 4}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).Reset", Method, 3}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrHeader", Var, 0}, + {"Header", Type, 0}, + {"Header.Comment", Field, 0}, + {"Header.Extra", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.OS", Field, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NoCompression", Const, 0}, + {"Reader", Type, 0}, + {"Reader.Header", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Header", Field, 0}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17}, + {"(*Reader).Read", Method, 17}, + {"(*Reader).Reset", Method, 17}, + {"(*Writer).Close", Method, 17}, + {"(*Writer).Reset", Method, 17}, + {"(*Writer).Write", Method, 17}, + {"LSB", Const, 0}, + {"MSB", Const, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Order", Type, 0}, + {"Reader", Type, 17}, + {"Writer", Type, 17}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrDictionary", Var, 0}, + {"ErrHeader", Var, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NewWriterLevelDict", Func, 0}, + {"NoCompression", Const, 0}, + {"Resetter", Type, 4}, + {"Writer", Type, 0}, + }, + "container/heap": { + {"Fix", Func, 2}, + {"Init", Func, 0}, + {"Interface", Type, 0}, + {"Pop", Func, 0}, + {"Push", Func, 0}, + {"Remove", Func, 0}, + }, + "container/list": { + {"(*Element).Next", Method, 0}, + {"(*Element).Prev", Method, 0}, + {"(*List).Back", Method, 0}, + {"(*List).Front", Method, 0}, + {"(*List).Init", Method, 0}, + {"(*List).InsertAfter", Method, 0}, + {"(*List).InsertBefore", Method, 0}, + {"(*List).Len", Method, 0}, + {"(*List).MoveAfter", Method, 2}, + {"(*List).MoveBefore", Method, 2}, + {"(*List).MoveToBack", Method, 0}, + {"(*List).MoveToFront", Method, 0}, + {"(*List).PushBack", Method, 0}, + {"(*List).PushBackList", Method, 0}, + {"(*List).PushFront", Method, 0}, + {"(*List).PushFrontList", Method, 0}, + {"(*List).Remove", Method, 0}, + {"Element", Type, 0}, + {"Element.Value", Field, 0}, + {"List", Type, 0}, + {"New", Func, 0}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0}, + {"(*Ring).Len", Method, 0}, + {"(*Ring).Link", Method, 0}, + {"(*Ring).Move", Method, 0}, + {"(*Ring).Next", Method, 0}, + {"(*Ring).Prev", Method, 0}, + {"(*Ring).Unlink", Method, 0}, + {"New", Func, 0}, + {"Ring", Type, 0}, + {"Ring.Value", Field, 0}, + }, + "context": { + {"AfterFunc", Func, 21}, + {"Background", Func, 7}, + {"CancelCauseFunc", Type, 20}, + {"CancelFunc", Type, 7}, + {"Canceled", Var, 7}, + {"Cause", Func, 20}, + {"Context", Type, 7}, + {"DeadlineExceeded", Var, 7}, + {"TODO", Func, 7}, + {"WithCancel", Func, 7}, + {"WithCancelCause", Func, 20}, + {"WithDeadline", Func, 7}, + {"WithDeadlineCause", Func, 21}, + {"WithTimeout", Func, 7}, + {"WithTimeoutCause", Func, 21}, + {"WithValue", Func, 7}, + {"WithoutCancel", Func, 21}, + }, + "crypto": { + {"(Hash).Available", Method, 0}, + {"(Hash).HashFunc", Method, 4}, + {"(Hash).New", Method, 0}, + {"(Hash).Size", Method, 0}, + {"(Hash).String", Method, 15}, + {"BLAKE2b_256", Const, 9}, + {"BLAKE2b_384", Const, 9}, + {"BLAKE2b_512", Const, 9}, + {"BLAKE2s_256", Const, 9}, + {"Decrypter", Type, 5}, + {"DecrypterOpts", Type, 5}, + {"Hash", Type, 0}, + {"MD4", Const, 0}, + {"MD5", Const, 0}, + {"MD5SHA1", Const, 0}, + {"PrivateKey", Type, 0}, + {"PublicKey", Type, 2}, + {"RIPEMD160", Const, 0}, + {"RegisterHash", Func, 0}, + {"SHA1", Const, 0}, + {"SHA224", Const, 0}, + {"SHA256", Const, 0}, + {"SHA384", Const, 0}, + {"SHA3_224", Const, 4}, + {"SHA3_256", Const, 4}, + {"SHA3_384", Const, 4}, + {"SHA3_512", Const, 4}, + {"SHA512", Const, 0}, + {"SHA512_224", Const, 5}, + {"SHA512_256", Const, 5}, + {"Signer", Type, 4}, + {"SignerOpts", Type, 4}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0}, + {"(StreamWriter).Close", Method, 0}, + {"(StreamWriter).Write", Method, 0}, + {"AEAD", Type, 2}, + {"Block", Type, 0}, + {"BlockMode", Type, 0}, + {"NewCBCDecrypter", Func, 0}, + {"NewCBCEncrypter", Func, 0}, + {"NewCFBDecrypter", Func, 0}, + {"NewCFBEncrypter", Func, 0}, + {"NewCTR", Func, 0}, + {"NewGCM", Func, 2}, + {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithTagSize", Func, 11}, + {"NewOFB", Func, 0}, + {"Stream", Type, 0}, + {"StreamReader", Type, 0}, + {"StreamReader.R", Field, 0}, + {"StreamReader.S", Field, 0}, + {"StreamWriter", Type, 0}, + {"StreamWriter.Err", Field, 0}, + {"StreamWriter.S", Field, 0}, + {"StreamWriter.W", Field, 0}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + {"NewTripleDESCipher", Func, 0}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateParameters", Func, 0}, + {"L1024N160", Const, 0}, + {"L2048N224", Const, 0}, + {"L2048N256", Const, 0}, + {"L3072N256", Const, 0}, + {"ParameterSizes", Type, 0}, + {"Parameters", Type, 0}, + {"Parameters.G", Field, 0}, + {"Parameters.P", Field, 0}, + {"Parameters.Q", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PrivateKey.X", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Parameters", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"Verify", Func, 0}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20}, + {"(*PrivateKey).Curve", Method, 20}, + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 20}, + {"(*PrivateKey).Public", Method, 20}, + {"(*PrivateKey).PublicKey", Method, 20}, + {"(*PublicKey).Bytes", Method, 20}, + {"(*PublicKey).Curve", Method, 20}, + {"(*PublicKey).Equal", Method, 20}, + {"Curve", Type, 20}, + {"P256", Func, 20}, + {"P384", Func, 20}, + {"P521", Func, 20}, + {"PrivateKey", Type, 20}, + {"PublicKey", Type, 20}, + {"X25519", Func, 20}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PublicKey).ECDH", Method, 20}, + {"(*PublicKey).Equal", Method, 15}, + {"(PrivateKey).Add", Method, 0}, + {"(PrivateKey).Double", Method, 0}, + {"(PrivateKey).IsOnCurve", Method, 0}, + {"(PrivateKey).Params", Method, 0}, + {"(PrivateKey).ScalarBaseMult", Method, 0}, + {"(PrivateKey).ScalarMult", Method, 0}, + {"(PublicKey).Add", Method, 0}, + {"(PublicKey).Double", Method, 0}, + {"(PublicKey).IsOnCurve", Method, 0}, + {"(PublicKey).Params", Method, 0}, + {"(PublicKey).ScalarBaseMult", Method, 0}, + {"(PublicKey).ScalarMult", Method, 0}, + {"GenerateKey", Func, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Curve", Field, 0}, + {"PublicKey.X", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"SignASN1", Func, 15}, + {"Verify", Func, 0}, + {"VerifyASN1", Func, 15}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20}, + {"(PrivateKey).Equal", Method, 15}, + {"(PrivateKey).Public", Method, 13}, + {"(PrivateKey).Seed", Method, 13}, + {"(PrivateKey).Sign", Method, 13}, + {"(PublicKey).Equal", Method, 15}, + {"GenerateKey", Func, 13}, + {"NewKeyFromSeed", Func, 13}, + {"Options", Type, 20}, + {"Options.Context", Field, 20}, + {"Options.Hash", Field, 20}, + {"PrivateKey", Type, 13}, + {"PrivateKeySize", Const, 13}, + {"PublicKey", Type, 13}, + {"PublicKeySize", Const, 13}, + {"SeedSize", Const, 13}, + {"Sign", Func, 13}, + {"SignatureSize", Const, 13}, + {"Verify", Func, 13}, + {"VerifyWithOptions", Func, 20}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0}, + {"(*CurveParams).Double", Method, 0}, + {"(*CurveParams).IsOnCurve", Method, 0}, + {"(*CurveParams).Params", Method, 0}, + {"(*CurveParams).ScalarBaseMult", Method, 0}, + {"(*CurveParams).ScalarMult", Method, 0}, + {"Curve", Type, 0}, + {"CurveParams", Type, 0}, + {"CurveParams.B", Field, 0}, + {"CurveParams.BitSize", Field, 0}, + {"CurveParams.Gx", Field, 0}, + {"CurveParams.Gy", Field, 0}, + {"CurveParams.N", Field, 0}, + {"CurveParams.Name", Field, 5}, + {"CurveParams.P", Field, 0}, + {"GenerateKey", Func, 0}, + {"Marshal", Func, 0}, + {"MarshalCompressed", Func, 15}, + {"P224", Func, 0}, + {"P256", Func, 0}, + {"P384", Func, 0}, + {"P521", Func, 0}, + {"Unmarshal", Func, 0}, + {"UnmarshalCompressed", Func, 15}, + }, + "crypto/hmac": { + {"Equal", Func, 1}, + {"New", Func, 0}, + }, + "crypto/md5": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/rand": { + {"Int", Func, 0}, + {"Prime", Func, 0}, + {"Read", Func, 0}, + {"Reader", Var, 0}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0}, + {"(*Cipher).XORKeyStream", Method, 0}, + {"(KeySizeError).Error", Method, 0}, + {"Cipher", Type, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4}, + {"(*PrivateKey).Decrypt", Method, 5}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Precompute", Method, 0}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PrivateKey).Size", Method, 11}, + {"(*PrivateKey).Validate", Method, 0}, + {"(*PublicKey).Equal", Method, 15}, + {"(*PublicKey).Size", Method, 11}, + {"CRTValue", Type, 0}, + {"CRTValue.Coeff", Field, 0}, + {"CRTValue.Exp", Field, 0}, + {"CRTValue.R", Field, 0}, + {"DecryptOAEP", Func, 0}, + {"DecryptPKCS1v15", Func, 0}, + {"DecryptPKCS1v15SessionKey", Func, 0}, + {"EncryptOAEP", Func, 0}, + {"EncryptPKCS1v15", Func, 0}, + {"ErrDecryption", Var, 0}, + {"ErrMessageTooLong", Var, 0}, + {"ErrVerification", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateMultiPrimeKey", Func, 0}, + {"OAEPOptions", Type, 5}, + {"OAEPOptions.Hash", Field, 5}, + {"OAEPOptions.Label", Field, 5}, + {"OAEPOptions.MGFHash", Field, 20}, + {"PKCS1v15DecryptOptions", Type, 5}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5}, + {"PSSOptions", Type, 2}, + {"PSSOptions.Hash", Field, 4}, + {"PSSOptions.SaltLength", Field, 2}, + {"PSSSaltLengthAuto", Const, 2}, + {"PSSSaltLengthEqualsHash", Const, 2}, + {"PrecomputedValues", Type, 0}, + {"PrecomputedValues.CRTValues", Field, 0}, + {"PrecomputedValues.Dp", Field, 0}, + {"PrecomputedValues.Dq", Field, 0}, + {"PrecomputedValues.Qinv", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.Precomputed", Field, 0}, + {"PrivateKey.Primes", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.E", Field, 0}, + {"PublicKey.N", Field, 0}, + {"SignPKCS1v15", Func, 0}, + {"SignPSS", Func, 2}, + {"VerifyPKCS1v15", Func, 0}, + {"VerifyPSS", Func, 2}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New224", Func, 0}, + {"Size", Const, 0}, + {"Size224", Const, 0}, + {"Sum224", Func, 2}, + {"Sum256", Func, 2}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New384", Func, 0}, + {"New512_224", Func, 5}, + {"New512_256", Func, 5}, + {"Size", Const, 0}, + {"Size224", Const, 5}, + {"Size256", Const, 5}, + {"Size384", Const, 0}, + {"Sum384", Func, 2}, + {"Sum512", Func, 2}, + {"Sum512_224", Func, 5}, + {"Sum512_256", Func, 5}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0}, + {"ConstantTimeCompare", Func, 0}, + {"ConstantTimeCopy", Func, 0}, + {"ConstantTimeEq", Func, 0}, + {"ConstantTimeLessOrEq", Func, 2}, + {"ConstantTimeSelect", Func, 0}, + {"XORBytes", Func, 20}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14}, + {"(*CertificateVerificationError).Error", Method, 20}, + {"(*CertificateVerificationError).Unwrap", Method, 20}, + {"(*ClientHelloInfo).Context", Method, 17}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14}, + {"(*ClientSessionState).ResumptionState", Method, 21}, + {"(*Config).BuildNameToCertificate", Method, 0}, + {"(*Config).Clone", Method, 8}, + {"(*Config).DecryptTicket", Method, 21}, + {"(*Config).EncryptTicket", Method, 21}, + {"(*Config).SetSessionTicketKeys", Method, 5}, + {"(*Conn).Close", Method, 0}, + {"(*Conn).CloseWrite", Method, 8}, + {"(*Conn).ConnectionState", Method, 0}, + {"(*Conn).Handshake", Method, 0}, + {"(*Conn).HandshakeContext", Method, 17}, + {"(*Conn).LocalAddr", Method, 0}, + {"(*Conn).NetConn", Method, 18}, + {"(*Conn).OCSPResponse", Method, 0}, + {"(*Conn).Read", Method, 0}, + {"(*Conn).RemoteAddr", Method, 0}, + {"(*Conn).SetDeadline", Method, 0}, + {"(*Conn).SetReadDeadline", Method, 0}, + {"(*Conn).SetWriteDeadline", Method, 0}, + {"(*Conn).VerifyHostname", Method, 0}, + {"(*Conn).Write", Method, 0}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, + {"(*Dialer).Dial", Method, 15}, + {"(*Dialer).DialContext", Method, 15}, + {"(*QUICConn).Close", Method, 21}, + {"(*QUICConn).ConnectionState", Method, 21}, + {"(*QUICConn).HandleData", Method, 21}, + {"(*QUICConn).NextEvent", Method, 21}, + {"(*QUICConn).SendSessionTicket", Method, 21}, + {"(*QUICConn).SetTransportParameters", Method, 21}, + {"(*QUICConn).Start", Method, 21}, + {"(*SessionState).Bytes", Method, 21}, + {"(AlertError).Error", Method, 21}, + {"(ClientAuthType).String", Method, 15}, + {"(CurveID).String", Method, 15}, + {"(QUICEncryptionLevel).String", Method, 21}, + {"(RecordHeaderError).Error", Method, 6}, + {"(SignatureScheme).String", Method, 15}, + {"AlertError", Type, 21}, + {"Certificate", Type, 0}, + {"Certificate.Certificate", Field, 0}, + {"Certificate.Leaf", Field, 0}, + {"Certificate.OCSPStaple", Field, 0}, + {"Certificate.PrivateKey", Field, 0}, + {"Certificate.SignedCertificateTimestamps", Field, 5}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14}, + {"CertificateRequestInfo", Type, 8}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8}, + {"CertificateRequestInfo.Version", Field, 14}, + {"CertificateVerificationError", Type, 20}, + {"CertificateVerificationError.Err", Field, 20}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20}, + {"CipherSuite", Type, 14}, + {"CipherSuite.ID", Field, 14}, + {"CipherSuite.Insecure", Field, 14}, + {"CipherSuite.Name", Field, 14}, + {"CipherSuite.SupportedVersions", Field, 14}, + {"CipherSuiteName", Func, 14}, + {"CipherSuites", Func, 14}, + {"Client", Func, 0}, + {"ClientAuthType", Type, 0}, + {"ClientHelloInfo", Type, 4}, + {"ClientHelloInfo.CipherSuites", Field, 4}, + {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.ServerName", Field, 4}, + {"ClientHelloInfo.SignatureSchemes", Field, 8}, + {"ClientHelloInfo.SupportedCurves", Field, 4}, + {"ClientHelloInfo.SupportedPoints", Field, 4}, + {"ClientHelloInfo.SupportedProtos", Field, 8}, + {"ClientHelloInfo.SupportedVersions", Field, 8}, + {"ClientSessionCache", Type, 3}, + {"ClientSessionState", Type, 3}, + {"Config", Type, 0}, + {"Config.Certificates", Field, 0}, + {"Config.CipherSuites", Field, 0}, + {"Config.ClientAuth", Field, 0}, + {"Config.ClientCAs", Field, 0}, + {"Config.ClientSessionCache", Field, 3}, + {"Config.CurvePreferences", Field, 3}, + {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.GetCertificate", Field, 4}, + {"Config.GetClientCertificate", Field, 8}, + {"Config.GetConfigForClient", Field, 8}, + {"Config.InsecureSkipVerify", Field, 0}, + {"Config.KeyLogWriter", Field, 8}, + {"Config.MaxVersion", Field, 2}, + {"Config.MinVersion", Field, 2}, + {"Config.NameToCertificate", Field, 0}, + {"Config.NextProtos", Field, 0}, + {"Config.PreferServerCipherSuites", Field, 1}, + {"Config.Rand", Field, 0}, + {"Config.Renegotiation", Field, 7}, + {"Config.RootCAs", Field, 0}, + {"Config.ServerName", Field, 0}, + {"Config.SessionTicketKey", Field, 1}, + {"Config.SessionTicketsDisabled", Field, 1}, + {"Config.Time", Field, 0}, + {"Config.UnwrapSession", Field, 21}, + {"Config.VerifyConnection", Field, 15}, + {"Config.VerifyPeerCertificate", Field, 8}, + {"Config.WrapSession", Field, 21}, + {"Conn", Type, 0}, + {"ConnectionState", Type, 0}, + {"ConnectionState.CipherSuite", Field, 0}, + {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.HandshakeComplete", Field, 0}, + {"ConnectionState.NegotiatedProtocol", Field, 0}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, + {"ConnectionState.OCSPResponse", Field, 5}, + {"ConnectionState.PeerCertificates", Field, 0}, + {"ConnectionState.ServerName", Field, 0}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5}, + {"ConnectionState.TLSUnique", Field, 4}, + {"ConnectionState.VerifiedChains", Field, 0}, + {"ConnectionState.Version", Field, 3}, + {"CurveID", Type, 3}, + {"CurveP256", Const, 3}, + {"CurveP384", Const, 3}, + {"CurveP521", Const, 3}, + {"Dial", Func, 0}, + {"DialWithDialer", Func, 3}, + {"Dialer", Type, 15}, + {"Dialer.Config", Field, 15}, + {"Dialer.NetDialer", Field, 15}, + {"ECDSAWithP256AndSHA256", Const, 8}, + {"ECDSAWithP384AndSHA384", Const, 8}, + {"ECDSAWithP521AndSHA512", Const, 8}, + {"ECDSAWithSHA1", Const, 10}, + {"Ed25519", Const, 13}, + {"InsecureCipherSuites", Func, 14}, + {"Listen", Func, 0}, + {"LoadX509KeyPair", Func, 0}, + {"NewLRUClientSessionCache", Func, 3}, + {"NewListener", Func, 0}, + {"NewResumptionState", Func, 21}, + {"NoClientCert", Const, 0}, + {"PKCS1WithSHA1", Const, 8}, + {"PKCS1WithSHA256", Const, 8}, + {"PKCS1WithSHA384", Const, 8}, + {"PKCS1WithSHA512", Const, 8}, + {"PSSWithSHA256", Const, 8}, + {"PSSWithSHA384", Const, 8}, + {"PSSWithSHA512", Const, 8}, + {"ParseSessionState", Func, 21}, + {"QUICClient", Func, 21}, + {"QUICConfig", Type, 21}, + {"QUICConfig.TLSConfig", Field, 21}, + {"QUICConn", Type, 21}, + {"QUICEncryptionLevel", Type, 21}, + {"QUICEncryptionLevelApplication", Const, 21}, + {"QUICEncryptionLevelEarly", Const, 21}, + {"QUICEncryptionLevelHandshake", Const, 21}, + {"QUICEncryptionLevelInitial", Const, 21}, + {"QUICEvent", Type, 21}, + {"QUICEvent.Data", Field, 21}, + {"QUICEvent.Kind", Field, 21}, + {"QUICEvent.Level", Field, 21}, + {"QUICEvent.Suite", Field, 21}, + {"QUICEventKind", Type, 21}, + {"QUICHandshakeDone", Const, 21}, + {"QUICNoEvent", Const, 21}, + {"QUICRejectedEarlyData", Const, 21}, + {"QUICServer", Func, 21}, + {"QUICSessionTicketOptions", Type, 21}, + {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSetReadSecret", Const, 21}, + {"QUICSetWriteSecret", Const, 21}, + {"QUICTransportParameters", Const, 21}, + {"QUICTransportParametersRequired", Const, 21}, + {"QUICWriteData", Const, 21}, + {"RecordHeaderError", Type, 6}, + {"RecordHeaderError.Conn", Field, 12}, + {"RecordHeaderError.Msg", Field, 6}, + {"RecordHeaderError.RecordHeader", Field, 6}, + {"RenegotiateFreelyAsClient", Const, 7}, + {"RenegotiateNever", Const, 7}, + {"RenegotiateOnceAsClient", Const, 7}, + {"RenegotiationSupport", Type, 7}, + {"RequestClientCert", Const, 0}, + {"RequireAndVerifyClientCert", Const, 0}, + {"RequireAnyClientCert", Const, 0}, + {"Server", Func, 0}, + {"SessionState", Type, 21}, + {"SessionState.EarlyData", Field, 21}, + {"SessionState.Extra", Field, 21}, + {"SignatureScheme", Type, 8}, + {"TLS_AES_128_GCM_SHA256", Const, 12}, + {"TLS_AES_256_GCM_SHA384", Const, 12}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0}, + {"TLS_FALLBACK_SCSV", Const, 4}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0}, + {"VerifyClientCertIfGiven", Const, 0}, + {"VersionName", Func, 21}, + {"VersionSSL30", Const, 2}, + {"VersionTLS10", Const, 2}, + {"VersionTLS11", Const, 2}, + {"VersionTLS12", Const, 2}, + {"VersionTLS13", Const, 12}, + {"X25519", Const, 8}, + {"X509KeyPair", Func, 0}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0}, + {"(*CertPool).AddCertWithConstraint", Method, 22}, + {"(*CertPool).AppendCertsFromPEM", Method, 0}, + {"(*CertPool).Clone", Method, 19}, + {"(*CertPool).Equal", Method, 19}, + {"(*CertPool).Subjects", Method, 0}, + {"(*Certificate).CheckCRLSignature", Method, 0}, + {"(*Certificate).CheckSignature", Method, 0}, + {"(*Certificate).CheckSignatureFrom", Method, 0}, + {"(*Certificate).CreateCRL", Method, 0}, + {"(*Certificate).Equal", Method, 0}, + {"(*Certificate).Verify", Method, 0}, + {"(*Certificate).VerifyHostname", Method, 0}, + {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*RevocationList).CheckSignatureFrom", Method, 19}, + {"(CertificateInvalidError).Error", Method, 0}, + {"(ConstraintViolationError).Error", Method, 0}, + {"(HostnameError).Error", Method, 0}, + {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).Equal", Method, 22}, + {"(OID).EqualASN1OID", Method, 22}, + {"(OID).String", Method, 22}, + {"(PublicKeyAlgorithm).String", Method, 10}, + {"(SignatureAlgorithm).String", Method, 6}, + {"(SystemRootsError).Error", Method, 1}, + {"(SystemRootsError).Unwrap", Method, 16}, + {"(UnhandledCriticalExtension).Error", Method, 0}, + {"(UnknownAuthorityError).Error", Method, 0}, + {"CANotAuthorizedForExtKeyUsage", Const, 10}, + {"CANotAuthorizedForThisName", Const, 0}, + {"CertPool", Type, 0}, + {"Certificate", Type, 0}, + {"Certificate.AuthorityKeyId", Field, 0}, + {"Certificate.BasicConstraintsValid", Field, 0}, + {"Certificate.CRLDistributionPoints", Field, 2}, + {"Certificate.DNSNames", Field, 0}, + {"Certificate.EmailAddresses", Field, 0}, + {"Certificate.ExcludedDNSDomains", Field, 9}, + {"Certificate.ExcludedEmailAddresses", Field, 10}, + {"Certificate.ExcludedIPRanges", Field, 10}, + {"Certificate.ExcludedURIDomains", Field, 10}, + {"Certificate.ExtKeyUsage", Field, 0}, + {"Certificate.Extensions", Field, 2}, + {"Certificate.ExtraExtensions", Field, 2}, + {"Certificate.IPAddresses", Field, 1}, + {"Certificate.IsCA", Field, 0}, + {"Certificate.Issuer", Field, 0}, + {"Certificate.IssuingCertificateURL", Field, 2}, + {"Certificate.KeyUsage", Field, 0}, + {"Certificate.MaxPathLen", Field, 0}, + {"Certificate.MaxPathLenZero", Field, 4}, + {"Certificate.NotAfter", Field, 0}, + {"Certificate.NotBefore", Field, 0}, + {"Certificate.OCSPServer", Field, 2}, + {"Certificate.PermittedDNSDomains", Field, 0}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0}, + {"Certificate.PermittedEmailAddresses", Field, 10}, + {"Certificate.PermittedIPRanges", Field, 10}, + {"Certificate.PermittedURIDomains", Field, 10}, + {"Certificate.Policies", Field, 22}, + {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PublicKey", Field, 0}, + {"Certificate.PublicKeyAlgorithm", Field, 0}, + {"Certificate.Raw", Field, 0}, + {"Certificate.RawIssuer", Field, 0}, + {"Certificate.RawSubject", Field, 0}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, + {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.SerialNumber", Field, 0}, + {"Certificate.Signature", Field, 0}, + {"Certificate.SignatureAlgorithm", Field, 0}, + {"Certificate.Subject", Field, 0}, + {"Certificate.SubjectKeyId", Field, 0}, + {"Certificate.URIs", Field, 10}, + {"Certificate.UnhandledCriticalExtensions", Field, 5}, + {"Certificate.UnknownExtKeyUsage", Field, 0}, + {"Certificate.Version", Field, 0}, + {"CertificateInvalidError", Type, 0}, + {"CertificateInvalidError.Cert", Field, 0}, + {"CertificateInvalidError.Detail", Field, 10}, + {"CertificateInvalidError.Reason", Field, 0}, + {"CertificateRequest", Type, 3}, + {"CertificateRequest.Attributes", Field, 3}, + {"CertificateRequest.DNSNames", Field, 3}, + {"CertificateRequest.EmailAddresses", Field, 3}, + {"CertificateRequest.Extensions", Field, 3}, + {"CertificateRequest.ExtraExtensions", Field, 3}, + {"CertificateRequest.IPAddresses", Field, 3}, + {"CertificateRequest.PublicKey", Field, 3}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3}, + {"CertificateRequest.Raw", Field, 3}, + {"CertificateRequest.RawSubject", Field, 3}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3}, + {"CertificateRequest.Signature", Field, 3}, + {"CertificateRequest.SignatureAlgorithm", Field, 3}, + {"CertificateRequest.Subject", Field, 3}, + {"CertificateRequest.URIs", Field, 10}, + {"CertificateRequest.Version", Field, 3}, + {"ConstraintViolationError", Type, 0}, + {"CreateCertificate", Func, 0}, + {"CreateCertificateRequest", Func, 3}, + {"CreateRevocationList", Func, 15}, + {"DSA", Const, 0}, + {"DSAWithSHA1", Const, 0}, + {"DSAWithSHA256", Const, 0}, + {"DecryptPEMBlock", Func, 1}, + {"ECDSA", Const, 1}, + {"ECDSAWithSHA1", Const, 1}, + {"ECDSAWithSHA256", Const, 1}, + {"ECDSAWithSHA384", Const, 1}, + {"ECDSAWithSHA512", Const, 1}, + {"Ed25519", Const, 13}, + {"EncryptPEMBlock", Func, 1}, + {"ErrUnsupportedAlgorithm", Var, 0}, + {"Expired", Const, 0}, + {"ExtKeyUsage", Type, 0}, + {"ExtKeyUsageAny", Const, 0}, + {"ExtKeyUsageClientAuth", Const, 0}, + {"ExtKeyUsageCodeSigning", Const, 0}, + {"ExtKeyUsageEmailProtection", Const, 0}, + {"ExtKeyUsageIPSECEndSystem", Const, 1}, + {"ExtKeyUsageIPSECTunnel", Const, 1}, + {"ExtKeyUsageIPSECUser", Const, 1}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1}, + {"ExtKeyUsageOCSPSigning", Const, 0}, + {"ExtKeyUsageServerAuth", Const, 0}, + {"ExtKeyUsageTimeStamping", Const, 0}, + {"HostnameError", Type, 0}, + {"HostnameError.Certificate", Field, 0}, + {"HostnameError.Host", Field, 0}, + {"IncompatibleUsage", Const, 1}, + {"IncorrectPasswordError", Var, 1}, + {"InsecureAlgorithmError", Type, 6}, + {"InvalidReason", Type, 0}, + {"IsEncryptedPEMBlock", Func, 1}, + {"KeyUsage", Type, 0}, + {"KeyUsageCRLSign", Const, 0}, + {"KeyUsageCertSign", Const, 0}, + {"KeyUsageContentCommitment", Const, 0}, + {"KeyUsageDataEncipherment", Const, 0}, + {"KeyUsageDecipherOnly", Const, 0}, + {"KeyUsageDigitalSignature", Const, 0}, + {"KeyUsageEncipherOnly", Const, 0}, + {"KeyUsageKeyAgreement", Const, 0}, + {"KeyUsageKeyEncipherment", Const, 0}, + {"MD2WithRSA", Const, 0}, + {"MD5WithRSA", Const, 0}, + {"MarshalECPrivateKey", Func, 2}, + {"MarshalPKCS1PrivateKey", Func, 0}, + {"MarshalPKCS1PublicKey", Func, 10}, + {"MarshalPKCS8PrivateKey", Func, 10}, + {"MarshalPKIXPublicKey", Func, 0}, + {"NameConstraintsWithoutSANs", Const, 10}, + {"NameMismatch", Const, 8}, + {"NewCertPool", Func, 0}, + {"NotAuthorizedToSign", Const, 0}, + {"OID", Type, 22}, + {"OIDFromInts", Func, 22}, + {"PEMCipher", Type, 1}, + {"PEMCipher3DES", Const, 1}, + {"PEMCipherAES128", Const, 1}, + {"PEMCipherAES192", Const, 1}, + {"PEMCipherAES256", Const, 1}, + {"PEMCipherDES", Const, 1}, + {"ParseCRL", Func, 0}, + {"ParseCertificate", Func, 0}, + {"ParseCertificateRequest", Func, 3}, + {"ParseCertificates", Func, 0}, + {"ParseDERCRL", Func, 0}, + {"ParseECPrivateKey", Func, 1}, + {"ParsePKCS1PrivateKey", Func, 0}, + {"ParsePKCS1PublicKey", Func, 10}, + {"ParsePKCS8PrivateKey", Func, 0}, + {"ParsePKIXPublicKey", Func, 0}, + {"ParseRevocationList", Func, 19}, + {"PublicKeyAlgorithm", Type, 0}, + {"PureEd25519", Const, 13}, + {"RSA", Const, 0}, + {"RevocationList", Type, 15}, + {"RevocationList.AuthorityKeyId", Field, 19}, + {"RevocationList.Extensions", Field, 19}, + {"RevocationList.ExtraExtensions", Field, 15}, + {"RevocationList.Issuer", Field, 19}, + {"RevocationList.NextUpdate", Field, 15}, + {"RevocationList.Number", Field, 15}, + {"RevocationList.Raw", Field, 19}, + {"RevocationList.RawIssuer", Field, 19}, + {"RevocationList.RawTBSRevocationList", Field, 19}, + {"RevocationList.RevokedCertificateEntries", Field, 21}, + {"RevocationList.RevokedCertificates", Field, 15}, + {"RevocationList.Signature", Field, 19}, + {"RevocationList.SignatureAlgorithm", Field, 15}, + {"RevocationList.ThisUpdate", Field, 15}, + {"RevocationListEntry", Type, 21}, + {"RevocationListEntry.Extensions", Field, 21}, + {"RevocationListEntry.ExtraExtensions", Field, 21}, + {"RevocationListEntry.Raw", Field, 21}, + {"RevocationListEntry.ReasonCode", Field, 21}, + {"RevocationListEntry.RevocationTime", Field, 21}, + {"RevocationListEntry.SerialNumber", Field, 21}, + {"SHA1WithRSA", Const, 0}, + {"SHA256WithRSA", Const, 0}, + {"SHA256WithRSAPSS", Const, 8}, + {"SHA384WithRSA", Const, 0}, + {"SHA384WithRSAPSS", Const, 8}, + {"SHA512WithRSA", Const, 0}, + {"SHA512WithRSAPSS", Const, 8}, + {"SetFallbackRoots", Func, 20}, + {"SignatureAlgorithm", Type, 0}, + {"SystemCertPool", Func, 7}, + {"SystemRootsError", Type, 1}, + {"SystemRootsError.Err", Field, 7}, + {"TooManyConstraints", Const, 10}, + {"TooManyIntermediates", Const, 0}, + {"UnconstrainedName", Const, 10}, + {"UnhandledCriticalExtension", Type, 0}, + {"UnknownAuthorityError", Type, 0}, + {"UnknownAuthorityError.Cert", Field, 8}, + {"UnknownPublicKeyAlgorithm", Const, 0}, + {"UnknownSignatureAlgorithm", Const, 0}, + {"VerifyOptions", Type, 0}, + {"VerifyOptions.CurrentTime", Field, 0}, + {"VerifyOptions.DNSName", Field, 0}, + {"VerifyOptions.Intermediates", Field, 0}, + {"VerifyOptions.KeyUsages", Field, 1}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10}, + {"VerifyOptions.Roots", Field, 0}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0}, + {"(*Name).FillFromRDNSequence", Method, 0}, + {"(Name).String", Method, 10}, + {"(Name).ToRDNSequence", Method, 0}, + {"(RDNSequence).String", Method, 10}, + {"AlgorithmIdentifier", Type, 0}, + {"AlgorithmIdentifier.Algorithm", Field, 0}, + {"AlgorithmIdentifier.Parameters", Field, 0}, + {"AttributeTypeAndValue", Type, 0}, + {"AttributeTypeAndValue.Type", Field, 0}, + {"AttributeTypeAndValue.Value", Field, 0}, + {"AttributeTypeAndValueSET", Type, 3}, + {"AttributeTypeAndValueSET.Type", Field, 3}, + {"AttributeTypeAndValueSET.Value", Field, 3}, + {"CertificateList", Type, 0}, + {"CertificateList.SignatureAlgorithm", Field, 0}, + {"CertificateList.SignatureValue", Field, 0}, + {"CertificateList.TBSCertList", Field, 0}, + {"Extension", Type, 0}, + {"Extension.Critical", Field, 0}, + {"Extension.Id", Field, 0}, + {"Extension.Value", Field, 0}, + {"Name", Type, 0}, + {"Name.CommonName", Field, 0}, + {"Name.Country", Field, 0}, + {"Name.ExtraNames", Field, 5}, + {"Name.Locality", Field, 0}, + {"Name.Names", Field, 0}, + {"Name.Organization", Field, 0}, + {"Name.OrganizationalUnit", Field, 0}, + {"Name.PostalCode", Field, 0}, + {"Name.Province", Field, 0}, + {"Name.SerialNumber", Field, 0}, + {"Name.StreetAddress", Field, 0}, + {"RDNSequence", Type, 0}, + {"RelativeDistinguishedNameSET", Type, 0}, + {"RevokedCertificate", Type, 0}, + {"RevokedCertificate.Extensions", Field, 0}, + {"RevokedCertificate.RevocationTime", Field, 0}, + {"RevokedCertificate.SerialNumber", Field, 0}, + {"TBSCertificateList", Type, 0}, + {"TBSCertificateList.Extensions", Field, 0}, + {"TBSCertificateList.Issuer", Field, 0}, + {"TBSCertificateList.NextUpdate", Field, 0}, + {"TBSCertificateList.Raw", Field, 0}, + {"TBSCertificateList.RevokedCertificates", Field, 0}, + {"TBSCertificateList.Signature", Field, 0}, + {"TBSCertificateList.ThisUpdate", Field, 0}, + {"TBSCertificateList.Version", Field, 0}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8}, + {"(*ColumnType).DecimalSize", Method, 8}, + {"(*ColumnType).Length", Method, 8}, + {"(*ColumnType).Name", Method, 8}, + {"(*ColumnType).Nullable", Method, 8}, + {"(*ColumnType).ScanType", Method, 8}, + {"(*Conn).BeginTx", Method, 9}, + {"(*Conn).Close", Method, 9}, + {"(*Conn).ExecContext", Method, 9}, + {"(*Conn).PingContext", Method, 9}, + {"(*Conn).PrepareContext", Method, 9}, + {"(*Conn).QueryContext", Method, 9}, + {"(*Conn).QueryRowContext", Method, 9}, + {"(*Conn).Raw", Method, 13}, + {"(*DB).Begin", Method, 0}, + {"(*DB).BeginTx", Method, 8}, + {"(*DB).Close", Method, 0}, + {"(*DB).Conn", Method, 9}, + {"(*DB).Driver", Method, 0}, + {"(*DB).Exec", Method, 0}, + {"(*DB).ExecContext", Method, 8}, + {"(*DB).Ping", Method, 1}, + {"(*DB).PingContext", Method, 8}, + {"(*DB).Prepare", Method, 0}, + {"(*DB).PrepareContext", Method, 8}, + {"(*DB).Query", Method, 0}, + {"(*DB).QueryContext", Method, 8}, + {"(*DB).QueryRow", Method, 0}, + {"(*DB).QueryRowContext", Method, 8}, + {"(*DB).SetConnMaxIdleTime", Method, 15}, + {"(*DB).SetConnMaxLifetime", Method, 6}, + {"(*DB).SetMaxIdleConns", Method, 1}, + {"(*DB).SetMaxOpenConns", Method, 2}, + {"(*DB).Stats", Method, 5}, + {"(*Null).Scan", Method, 22}, + {"(*NullBool).Scan", Method, 0}, + {"(*NullByte).Scan", Method, 17}, + {"(*NullFloat64).Scan", Method, 0}, + {"(*NullInt16).Scan", Method, 17}, + {"(*NullInt32).Scan", Method, 13}, + {"(*NullInt64).Scan", Method, 0}, + {"(*NullString).Scan", Method, 0}, + {"(*NullTime).Scan", Method, 13}, + {"(*Row).Err", Method, 15}, + {"(*Row).Scan", Method, 0}, + {"(*Rows).Close", Method, 0}, + {"(*Rows).ColumnTypes", Method, 8}, + {"(*Rows).Columns", Method, 0}, + {"(*Rows).Err", Method, 0}, + {"(*Rows).Next", Method, 0}, + {"(*Rows).NextResultSet", Method, 8}, + {"(*Rows).Scan", Method, 0}, + {"(*Stmt).Close", Method, 0}, + {"(*Stmt).Exec", Method, 0}, + {"(*Stmt).ExecContext", Method, 8}, + {"(*Stmt).Query", Method, 0}, + {"(*Stmt).QueryContext", Method, 8}, + {"(*Stmt).QueryRow", Method, 0}, + {"(*Stmt).QueryRowContext", Method, 8}, + {"(*Tx).Commit", Method, 0}, + {"(*Tx).Exec", Method, 0}, + {"(*Tx).ExecContext", Method, 8}, + {"(*Tx).Prepare", Method, 0}, + {"(*Tx).PrepareContext", Method, 8}, + {"(*Tx).Query", Method, 0}, + {"(*Tx).QueryContext", Method, 8}, + {"(*Tx).QueryRow", Method, 0}, + {"(*Tx).QueryRowContext", Method, 8}, + {"(*Tx).Rollback", Method, 0}, + {"(*Tx).Stmt", Method, 0}, + {"(*Tx).StmtContext", Method, 8}, + {"(IsolationLevel).String", Method, 11}, + {"(Null).Value", Method, 22}, + {"(NullBool).Value", Method, 0}, + {"(NullByte).Value", Method, 17}, + {"(NullFloat64).Value", Method, 0}, + {"(NullInt16).Value", Method, 17}, + {"(NullInt32).Value", Method, 13}, + {"(NullInt64).Value", Method, 0}, + {"(NullString).Value", Method, 0}, + {"(NullTime).Value", Method, 13}, + {"ColumnType", Type, 8}, + {"Conn", Type, 9}, + {"DB", Type, 0}, + {"DBStats", Type, 5}, + {"DBStats.Idle", Field, 11}, + {"DBStats.InUse", Field, 11}, + {"DBStats.MaxIdleClosed", Field, 11}, + {"DBStats.MaxIdleTimeClosed", Field, 15}, + {"DBStats.MaxLifetimeClosed", Field, 11}, + {"DBStats.MaxOpenConnections", Field, 11}, + {"DBStats.OpenConnections", Field, 5}, + {"DBStats.WaitCount", Field, 11}, + {"DBStats.WaitDuration", Field, 11}, + {"Drivers", Func, 4}, + {"ErrConnDone", Var, 9}, + {"ErrNoRows", Var, 0}, + {"ErrTxDone", Var, 0}, + {"IsolationLevel", Type, 8}, + {"LevelDefault", Const, 8}, + {"LevelLinearizable", Const, 8}, + {"LevelReadCommitted", Const, 8}, + {"LevelReadUncommitted", Const, 8}, + {"LevelRepeatableRead", Const, 8}, + {"LevelSerializable", Const, 8}, + {"LevelSnapshot", Const, 8}, + {"LevelWriteCommitted", Const, 8}, + {"Named", Func, 8}, + {"NamedArg", Type, 8}, + {"NamedArg.Name", Field, 8}, + {"NamedArg.Value", Field, 8}, + {"Null", Type, 22}, + {"Null.V", Field, 22}, + {"Null.Valid", Field, 22}, + {"NullBool", Type, 0}, + {"NullBool.Bool", Field, 0}, + {"NullBool.Valid", Field, 0}, + {"NullByte", Type, 17}, + {"NullByte.Byte", Field, 17}, + {"NullByte.Valid", Field, 17}, + {"NullFloat64", Type, 0}, + {"NullFloat64.Float64", Field, 0}, + {"NullFloat64.Valid", Field, 0}, + {"NullInt16", Type, 17}, + {"NullInt16.Int16", Field, 17}, + {"NullInt16.Valid", Field, 17}, + {"NullInt32", Type, 13}, + {"NullInt32.Int32", Field, 13}, + {"NullInt32.Valid", Field, 13}, + {"NullInt64", Type, 0}, + {"NullInt64.Int64", Field, 0}, + {"NullInt64.Valid", Field, 0}, + {"NullString", Type, 0}, + {"NullString.String", Field, 0}, + {"NullString.Valid", Field, 0}, + {"NullTime", Type, 13}, + {"NullTime.Time", Field, 13}, + {"NullTime.Valid", Field, 13}, + {"Open", Func, 0}, + {"OpenDB", Func, 10}, + {"Out", Type, 9}, + {"Out.Dest", Field, 9}, + {"Out.In", Field, 9}, + {"RawBytes", Type, 0}, + {"Register", Func, 0}, + {"Result", Type, 0}, + {"Row", Type, 0}, + {"Rows", Type, 0}, + {"Scanner", Type, 0}, + {"Stmt", Type, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0}, + {"(Null).ConvertValue", Method, 0}, + {"(RowsAffected).LastInsertId", Method, 0}, + {"(RowsAffected).RowsAffected", Method, 0}, + {"Bool", Var, 0}, + {"ColumnConverter", Type, 0}, + {"Conn", Type, 0}, + {"ConnBeginTx", Type, 8}, + {"ConnPrepareContext", Type, 8}, + {"Connector", Type, 10}, + {"DefaultParameterConverter", Var, 0}, + {"Driver", Type, 0}, + {"DriverContext", Type, 10}, + {"ErrBadConn", Var, 0}, + {"ErrRemoveArgument", Var, 9}, + {"ErrSkip", Var, 0}, + {"Execer", Type, 0}, + {"ExecerContext", Type, 8}, + {"Int32", Var, 0}, + {"IsScanValue", Func, 0}, + {"IsValue", Func, 0}, + {"IsolationLevel", Type, 8}, + {"NamedValue", Type, 8}, + {"NamedValue.Name", Field, 8}, + {"NamedValue.Ordinal", Field, 8}, + {"NamedValue.Value", Field, 8}, + {"NamedValueChecker", Type, 9}, + {"NotNull", Type, 0}, + {"NotNull.Converter", Field, 0}, + {"Null", Type, 0}, + {"Null.Converter", Field, 0}, + {"Pinger", Type, 8}, + {"Queryer", Type, 1}, + {"QueryerContext", Type, 8}, + {"Result", Type, 0}, + {"ResultNoRows", Var, 0}, + {"Rows", Type, 0}, + {"RowsAffected", Type, 0}, + {"RowsColumnTypeDatabaseTypeName", Type, 8}, + {"RowsColumnTypeLength", Type, 8}, + {"RowsColumnTypeNullable", Type, 8}, + {"RowsColumnTypePrecisionScale", Type, 8}, + {"RowsColumnTypeScanType", Type, 8}, + {"RowsNextResultSet", Type, 8}, + {"SessionResetter", Type, 10}, + {"Stmt", Type, 0}, + {"StmtExecContext", Type, 8}, + {"StmtQueryContext", Type, 8}, + {"String", Var, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + {"Validator", Type, 15}, + {"Value", Type, 0}, + {"ValueConverter", Type, 0}, + {"Valuer", Type, 0}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18}, + {"Read", Func, 18}, + {"ReadFile", Func, 18}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0}, + {"(*AddrType).Common", Method, 0}, + {"(*AddrType).Size", Method, 0}, + {"(*AddrType).String", Method, 0}, + {"(*ArrayType).Common", Method, 0}, + {"(*ArrayType).Size", Method, 0}, + {"(*ArrayType).String", Method, 0}, + {"(*BasicType).Basic", Method, 0}, + {"(*BasicType).Common", Method, 0}, + {"(*BasicType).Size", Method, 0}, + {"(*BasicType).String", Method, 0}, + {"(*BoolType).Basic", Method, 0}, + {"(*BoolType).Common", Method, 0}, + {"(*BoolType).Size", Method, 0}, + {"(*BoolType).String", Method, 0}, + {"(*CharType).Basic", Method, 0}, + {"(*CharType).Common", Method, 0}, + {"(*CharType).Size", Method, 0}, + {"(*CharType).String", Method, 0}, + {"(*CommonType).Common", Method, 0}, + {"(*CommonType).Size", Method, 0}, + {"(*ComplexType).Basic", Method, 0}, + {"(*ComplexType).Common", Method, 0}, + {"(*ComplexType).Size", Method, 0}, + {"(*ComplexType).String", Method, 0}, + {"(*Data).AddSection", Method, 14}, + {"(*Data).AddTypes", Method, 3}, + {"(*Data).LineReader", Method, 5}, + {"(*Data).Ranges", Method, 7}, + {"(*Data).Reader", Method, 0}, + {"(*Data).Type", Method, 0}, + {"(*DotDotDotType).Common", Method, 0}, + {"(*DotDotDotType).Size", Method, 0}, + {"(*DotDotDotType).String", Method, 0}, + {"(*Entry).AttrField", Method, 5}, + {"(*Entry).Val", Method, 0}, + {"(*EnumType).Common", Method, 0}, + {"(*EnumType).Size", Method, 0}, + {"(*EnumType).String", Method, 0}, + {"(*FloatType).Basic", Method, 0}, + {"(*FloatType).Common", Method, 0}, + {"(*FloatType).Size", Method, 0}, + {"(*FloatType).String", Method, 0}, + {"(*FuncType).Common", Method, 0}, + {"(*FuncType).Size", Method, 0}, + {"(*FuncType).String", Method, 0}, + {"(*IntType).Basic", Method, 0}, + {"(*IntType).Common", Method, 0}, + {"(*IntType).Size", Method, 0}, + {"(*IntType).String", Method, 0}, + {"(*LineReader).Files", Method, 14}, + {"(*LineReader).Next", Method, 5}, + {"(*LineReader).Reset", Method, 5}, + {"(*LineReader).Seek", Method, 5}, + {"(*LineReader).SeekPC", Method, 5}, + {"(*LineReader).Tell", Method, 5}, + {"(*PtrType).Common", Method, 0}, + {"(*PtrType).Size", Method, 0}, + {"(*PtrType).String", Method, 0}, + {"(*QualType).Common", Method, 0}, + {"(*QualType).Size", Method, 0}, + {"(*QualType).String", Method, 0}, + {"(*Reader).AddressSize", Method, 5}, + {"(*Reader).ByteOrder", Method, 14}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).SeekPC", Method, 7}, + {"(*Reader).SkipChildren", Method, 0}, + {"(*StructType).Common", Method, 0}, + {"(*StructType).Defn", Method, 0}, + {"(*StructType).Size", Method, 0}, + {"(*StructType).String", Method, 0}, + {"(*TypedefType).Common", Method, 0}, + {"(*TypedefType).Size", Method, 0}, + {"(*TypedefType).String", Method, 0}, + {"(*UcharType).Basic", Method, 0}, + {"(*UcharType).Common", Method, 0}, + {"(*UcharType).Size", Method, 0}, + {"(*UcharType).String", Method, 0}, + {"(*UintType).Basic", Method, 0}, + {"(*UintType).Common", Method, 0}, + {"(*UintType).Size", Method, 0}, + {"(*UintType).String", Method, 0}, + {"(*UnspecifiedType).Basic", Method, 4}, + {"(*UnspecifiedType).Common", Method, 4}, + {"(*UnspecifiedType).Size", Method, 4}, + {"(*UnspecifiedType).String", Method, 4}, + {"(*UnsupportedType).Common", Method, 13}, + {"(*UnsupportedType).Size", Method, 13}, + {"(*UnsupportedType).String", Method, 13}, + {"(*VoidType).Common", Method, 0}, + {"(*VoidType).Size", Method, 0}, + {"(*VoidType).String", Method, 0}, + {"(Attr).GoString", Method, 0}, + {"(Attr).String", Method, 0}, + {"(Class).GoString", Method, 5}, + {"(Class).String", Method, 5}, + {"(DecodeError).Error", Method, 0}, + {"(Tag).GoString", Method, 0}, + {"(Tag).String", Method, 0}, + {"AddrType", Type, 0}, + {"AddrType.BasicType", Field, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.CommonType", Field, 0}, + {"ArrayType.Count", Field, 0}, + {"ArrayType.StrideBitSize", Field, 0}, + {"ArrayType.Type", Field, 0}, + {"Attr", Type, 0}, + {"AttrAbstractOrigin", Const, 0}, + {"AttrAccessibility", Const, 0}, + {"AttrAddrBase", Const, 14}, + {"AttrAddrClass", Const, 0}, + {"AttrAlignment", Const, 14}, + {"AttrAllocated", Const, 0}, + {"AttrArtificial", Const, 0}, + {"AttrAssociated", Const, 0}, + {"AttrBaseTypes", Const, 0}, + {"AttrBinaryScale", Const, 14}, + {"AttrBitOffset", Const, 0}, + {"AttrBitSize", Const, 0}, + {"AttrByteSize", Const, 0}, + {"AttrCallAllCalls", Const, 14}, + {"AttrCallAllSourceCalls", Const, 14}, + {"AttrCallAllTailCalls", Const, 14}, + {"AttrCallColumn", Const, 0}, + {"AttrCallDataLocation", Const, 14}, + {"AttrCallDataValue", Const, 14}, + {"AttrCallFile", Const, 0}, + {"AttrCallLine", Const, 0}, + {"AttrCallOrigin", Const, 14}, + {"AttrCallPC", Const, 14}, + {"AttrCallParameter", Const, 14}, + {"AttrCallReturnPC", Const, 14}, + {"AttrCallTailCall", Const, 14}, + {"AttrCallTarget", Const, 14}, + {"AttrCallTargetClobbered", Const, 14}, + {"AttrCallValue", Const, 14}, + {"AttrCalling", Const, 0}, + {"AttrCommonRef", Const, 0}, + {"AttrCompDir", Const, 0}, + {"AttrConstExpr", Const, 14}, + {"AttrConstValue", Const, 0}, + {"AttrContainingType", Const, 0}, + {"AttrCount", Const, 0}, + {"AttrDataBitOffset", Const, 14}, + {"AttrDataLocation", Const, 0}, + {"AttrDataMemberLoc", Const, 0}, + {"AttrDecimalScale", Const, 14}, + {"AttrDecimalSign", Const, 14}, + {"AttrDeclColumn", Const, 0}, + {"AttrDeclFile", Const, 0}, + {"AttrDeclLine", Const, 0}, + {"AttrDeclaration", Const, 0}, + {"AttrDefaultValue", Const, 0}, + {"AttrDefaulted", Const, 14}, + {"AttrDeleted", Const, 14}, + {"AttrDescription", Const, 0}, + {"AttrDigitCount", Const, 14}, + {"AttrDiscr", Const, 0}, + {"AttrDiscrList", Const, 0}, + {"AttrDiscrValue", Const, 0}, + {"AttrDwoName", Const, 14}, + {"AttrElemental", Const, 14}, + {"AttrEncoding", Const, 0}, + {"AttrEndianity", Const, 14}, + {"AttrEntrypc", Const, 0}, + {"AttrEnumClass", Const, 14}, + {"AttrExplicit", Const, 14}, + {"AttrExportSymbols", Const, 14}, + {"AttrExtension", Const, 0}, + {"AttrExternal", Const, 0}, + {"AttrFrameBase", Const, 0}, + {"AttrFriend", Const, 0}, + {"AttrHighpc", Const, 0}, + {"AttrIdentifierCase", Const, 0}, + {"AttrImport", Const, 0}, + {"AttrInline", Const, 0}, + {"AttrIsOptional", Const, 0}, + {"AttrLanguage", Const, 0}, + {"AttrLinkageName", Const, 14}, + {"AttrLocation", Const, 0}, + {"AttrLoclistsBase", Const, 14}, + {"AttrLowerBound", Const, 0}, + {"AttrLowpc", Const, 0}, + {"AttrMacroInfo", Const, 0}, + {"AttrMacros", Const, 14}, + {"AttrMainSubprogram", Const, 14}, + {"AttrMutable", Const, 14}, + {"AttrName", Const, 0}, + {"AttrNamelistItem", Const, 0}, + {"AttrNoreturn", Const, 14}, + {"AttrObjectPointer", Const, 14}, + {"AttrOrdering", Const, 0}, + {"AttrPictureString", Const, 14}, + {"AttrPriority", Const, 0}, + {"AttrProducer", Const, 0}, + {"AttrPrototyped", Const, 0}, + {"AttrPure", Const, 14}, + {"AttrRanges", Const, 0}, + {"AttrRank", Const, 14}, + {"AttrRecursive", Const, 14}, + {"AttrReference", Const, 14}, + {"AttrReturnAddr", Const, 0}, + {"AttrRnglistsBase", Const, 14}, + {"AttrRvalueReference", Const, 14}, + {"AttrSegment", Const, 0}, + {"AttrSibling", Const, 0}, + {"AttrSignature", Const, 14}, + {"AttrSmall", Const, 14}, + {"AttrSpecification", Const, 0}, + {"AttrStartScope", Const, 0}, + {"AttrStaticLink", Const, 0}, + {"AttrStmtList", Const, 0}, + {"AttrStrOffsetsBase", Const, 14}, + {"AttrStride", Const, 0}, + {"AttrStrideSize", Const, 0}, + {"AttrStringLength", Const, 0}, + {"AttrStringLengthBitSize", Const, 14}, + {"AttrStringLengthByteSize", Const, 14}, + {"AttrThreadsScaled", Const, 14}, + {"AttrTrampoline", Const, 0}, + {"AttrType", Const, 0}, + {"AttrUpperBound", Const, 0}, + {"AttrUseLocation", Const, 0}, + {"AttrUseUTF8", Const, 0}, + {"AttrVarParam", Const, 0}, + {"AttrVirtuality", Const, 0}, + {"AttrVisibility", Const, 0}, + {"AttrVtableElemLoc", Const, 0}, + {"BasicType", Type, 0}, + {"BasicType.BitOffset", Field, 0}, + {"BasicType.BitSize", Field, 0}, + {"BasicType.CommonType", Field, 0}, + {"BasicType.DataBitOffset", Field, 18}, + {"BoolType", Type, 0}, + {"BoolType.BasicType", Field, 0}, + {"CharType", Type, 0}, + {"CharType.BasicType", Field, 0}, + {"Class", Type, 5}, + {"ClassAddrPtr", Const, 14}, + {"ClassAddress", Const, 5}, + {"ClassBlock", Const, 5}, + {"ClassConstant", Const, 5}, + {"ClassExprLoc", Const, 5}, + {"ClassFlag", Const, 5}, + {"ClassLinePtr", Const, 5}, + {"ClassLocList", Const, 14}, + {"ClassLocListPtr", Const, 5}, + {"ClassMacPtr", Const, 5}, + {"ClassRangeListPtr", Const, 5}, + {"ClassReference", Const, 5}, + {"ClassReferenceAlt", Const, 5}, + {"ClassReferenceSig", Const, 5}, + {"ClassRngList", Const, 14}, + {"ClassRngListsPtr", Const, 14}, + {"ClassStrOffsetsPtr", Const, 14}, + {"ClassString", Const, 5}, + {"ClassStringAlt", Const, 5}, + {"ClassUnknown", Const, 6}, + {"CommonType", Type, 0}, + {"CommonType.ByteSize", Field, 0}, + {"CommonType.Name", Field, 0}, + {"ComplexType", Type, 0}, + {"ComplexType.BasicType", Field, 0}, + {"Data", Type, 0}, + {"DecodeError", Type, 0}, + {"DecodeError.Err", Field, 0}, + {"DecodeError.Name", Field, 0}, + {"DecodeError.Offset", Field, 0}, + {"DotDotDotType", Type, 0}, + {"DotDotDotType.CommonType", Field, 0}, + {"Entry", Type, 0}, + {"Entry.Children", Field, 0}, + {"Entry.Field", Field, 0}, + {"Entry.Offset", Field, 0}, + {"Entry.Tag", Field, 0}, + {"EnumType", Type, 0}, + {"EnumType.CommonType", Field, 0}, + {"EnumType.EnumName", Field, 0}, + {"EnumType.Val", Field, 0}, + {"EnumValue", Type, 0}, + {"EnumValue.Name", Field, 0}, + {"EnumValue.Val", Field, 0}, + {"ErrUnknownPC", Var, 5}, + {"Field", Type, 0}, + {"Field.Attr", Field, 0}, + {"Field.Class", Field, 5}, + {"Field.Val", Field, 0}, + {"FloatType", Type, 0}, + {"FloatType.BasicType", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.CommonType", Field, 0}, + {"FuncType.ParamType", Field, 0}, + {"FuncType.ReturnType", Field, 0}, + {"IntType", Type, 0}, + {"IntType.BasicType", Field, 0}, + {"LineEntry", Type, 5}, + {"LineEntry.Address", Field, 5}, + {"LineEntry.BasicBlock", Field, 5}, + {"LineEntry.Column", Field, 5}, + {"LineEntry.Discriminator", Field, 5}, + {"LineEntry.EndSequence", Field, 5}, + {"LineEntry.EpilogueBegin", Field, 5}, + {"LineEntry.File", Field, 5}, + {"LineEntry.ISA", Field, 5}, + {"LineEntry.IsStmt", Field, 5}, + {"LineEntry.Line", Field, 5}, + {"LineEntry.OpIndex", Field, 5}, + {"LineEntry.PrologueEnd", Field, 5}, + {"LineFile", Type, 5}, + {"LineFile.Length", Field, 5}, + {"LineFile.Mtime", Field, 5}, + {"LineFile.Name", Field, 5}, + {"LineReader", Type, 5}, + {"LineReaderPos", Type, 5}, + {"New", Func, 0}, + {"Offset", Type, 0}, + {"PtrType", Type, 0}, + {"PtrType.CommonType", Field, 0}, + {"PtrType.Type", Field, 0}, + {"QualType", Type, 0}, + {"QualType.CommonType", Field, 0}, + {"QualType.Qual", Field, 0}, + {"QualType.Type", Field, 0}, + {"Reader", Type, 0}, + {"StructField", Type, 0}, + {"StructField.BitOffset", Field, 0}, + {"StructField.BitSize", Field, 0}, + {"StructField.ByteOffset", Field, 0}, + {"StructField.ByteSize", Field, 0}, + {"StructField.DataBitOffset", Field, 18}, + {"StructField.Name", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructType", Type, 0}, + {"StructType.CommonType", Field, 0}, + {"StructType.Field", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Kind", Field, 0}, + {"StructType.StructName", Field, 0}, + {"Tag", Type, 0}, + {"TagAccessDeclaration", Const, 0}, + {"TagArrayType", Const, 0}, + {"TagAtomicType", Const, 14}, + {"TagBaseType", Const, 0}, + {"TagCallSite", Const, 14}, + {"TagCallSiteParameter", Const, 14}, + {"TagCatchDwarfBlock", Const, 0}, + {"TagClassType", Const, 0}, + {"TagCoarrayType", Const, 14}, + {"TagCommonDwarfBlock", Const, 0}, + {"TagCommonInclusion", Const, 0}, + {"TagCompileUnit", Const, 0}, + {"TagCondition", Const, 3}, + {"TagConstType", Const, 0}, + {"TagConstant", Const, 0}, + {"TagDwarfProcedure", Const, 0}, + {"TagDynamicType", Const, 14}, + {"TagEntryPoint", Const, 0}, + {"TagEnumerationType", Const, 0}, + {"TagEnumerator", Const, 0}, + {"TagFileType", Const, 0}, + {"TagFormalParameter", Const, 0}, + {"TagFriend", Const, 0}, + {"TagGenericSubrange", Const, 14}, + {"TagImmutableType", Const, 14}, + {"TagImportedDeclaration", Const, 0}, + {"TagImportedModule", Const, 0}, + {"TagImportedUnit", Const, 0}, + {"TagInheritance", Const, 0}, + {"TagInlinedSubroutine", Const, 0}, + {"TagInterfaceType", Const, 0}, + {"TagLabel", Const, 0}, + {"TagLexDwarfBlock", Const, 0}, + {"TagMember", Const, 0}, + {"TagModule", Const, 0}, + {"TagMutableType", Const, 0}, + {"TagNamelist", Const, 0}, + {"TagNamelistItem", Const, 0}, + {"TagNamespace", Const, 0}, + {"TagPackedType", Const, 0}, + {"TagPartialUnit", Const, 0}, + {"TagPointerType", Const, 0}, + {"TagPtrToMemberType", Const, 0}, + {"TagReferenceType", Const, 0}, + {"TagRestrictType", Const, 0}, + {"TagRvalueReferenceType", Const, 3}, + {"TagSetType", Const, 0}, + {"TagSharedType", Const, 3}, + {"TagSkeletonUnit", Const, 14}, + {"TagStringType", Const, 0}, + {"TagStructType", Const, 0}, + {"TagSubprogram", Const, 0}, + {"TagSubrangeType", Const, 0}, + {"TagSubroutineType", Const, 0}, + {"TagTemplateAlias", Const, 3}, + {"TagTemplateTypeParameter", Const, 0}, + {"TagTemplateValueParameter", Const, 0}, + {"TagThrownType", Const, 0}, + {"TagTryDwarfBlock", Const, 0}, + {"TagTypeUnit", Const, 3}, + {"TagTypedef", Const, 0}, + {"TagUnionType", Const, 0}, + {"TagUnspecifiedParameters", Const, 0}, + {"TagUnspecifiedType", Const, 0}, + {"TagVariable", Const, 0}, + {"TagVariant", Const, 0}, + {"TagVariantPart", Const, 0}, + {"TagVolatileType", Const, 0}, + {"TagWithStmt", Const, 0}, + {"Type", Type, 0}, + {"TypedefType", Type, 0}, + {"TypedefType.CommonType", Field, 0}, + {"TypedefType.Type", Field, 0}, + {"UcharType", Type, 0}, + {"UcharType.BasicType", Field, 0}, + {"UintType", Type, 0}, + {"UintType.BasicType", Field, 0}, + {"UnspecifiedType", Type, 4}, + {"UnspecifiedType.BasicType", Field, 4}, + {"UnsupportedType", Type, 13}, + {"UnsupportedType.CommonType", Field, 13}, + {"UnsupportedType.Tag", Field, 13}, + {"VoidType", Type, 0}, + {"VoidType.CommonType", Field, 0}, + }, + "debug/elf": { + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).DynString", Method, 1}, + {"(*File).DynValue", Method, 21}, + {"(*File).DynamicSymbols", Method, 4}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).SectionByType", Method, 0}, + {"(*File).Symbols", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Prog).Open", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Class).GoString", Method, 0}, + {"(Class).String", Method, 0}, + {"(CompressionType).GoString", Method, 6}, + {"(CompressionType).String", Method, 6}, + {"(Data).GoString", Method, 0}, + {"(Data).String", Method, 0}, + {"(DynFlag).GoString", Method, 0}, + {"(DynFlag).String", Method, 0}, + {"(DynFlag1).GoString", Method, 21}, + {"(DynFlag1).String", Method, 21}, + {"(DynTag).GoString", Method, 0}, + {"(DynTag).String", Method, 0}, + {"(Machine).GoString", Method, 0}, + {"(Machine).String", Method, 0}, + {"(NType).GoString", Method, 0}, + {"(NType).String", Method, 0}, + {"(OSABI).GoString", Method, 0}, + {"(OSABI).String", Method, 0}, + {"(Prog).ReadAt", Method, 0}, + {"(ProgFlag).GoString", Method, 0}, + {"(ProgFlag).String", Method, 0}, + {"(ProgType).GoString", Method, 0}, + {"(ProgType).String", Method, 0}, + {"(R_386).GoString", Method, 0}, + {"(R_386).String", Method, 0}, + {"(R_390).GoString", Method, 7}, + {"(R_390).String", Method, 7}, + {"(R_AARCH64).GoString", Method, 4}, + {"(R_AARCH64).String", Method, 4}, + {"(R_ALPHA).GoString", Method, 0}, + {"(R_ALPHA).String", Method, 0}, + {"(R_ARM).GoString", Method, 0}, + {"(R_ARM).String", Method, 0}, + {"(R_LARCH).GoString", Method, 19}, + {"(R_LARCH).String", Method, 19}, + {"(R_MIPS).GoString", Method, 6}, + {"(R_MIPS).String", Method, 6}, + {"(R_PPC).GoString", Method, 0}, + {"(R_PPC).String", Method, 0}, + {"(R_PPC64).GoString", Method, 5}, + {"(R_PPC64).String", Method, 5}, + {"(R_RISCV).GoString", Method, 11}, + {"(R_RISCV).String", Method, 11}, + {"(R_SPARC).GoString", Method, 0}, + {"(R_SPARC).String", Method, 0}, + {"(R_X86_64).GoString", Method, 0}, + {"(R_X86_64).String", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(SectionFlag).GoString", Method, 0}, + {"(SectionFlag).String", Method, 0}, + {"(SectionIndex).GoString", Method, 0}, + {"(SectionIndex).String", Method, 0}, + {"(SectionType).GoString", Method, 0}, + {"(SectionType).String", Method, 0}, + {"(SymBind).GoString", Method, 0}, + {"(SymBind).String", Method, 0}, + {"(SymType).GoString", Method, 0}, + {"(SymType).String", Method, 0}, + {"(SymVis).GoString", Method, 0}, + {"(SymVis).String", Method, 0}, + {"(Type).GoString", Method, 0}, + {"(Type).String", Method, 0}, + {"(Version).GoString", Method, 0}, + {"(Version).String", Method, 0}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, + {"COMPRESS_HIOS", Const, 6}, + {"COMPRESS_HIPROC", Const, 6}, + {"COMPRESS_LOOS", Const, 6}, + {"COMPRESS_LOPROC", Const, 6}, + {"COMPRESS_ZLIB", Const, 6}, + {"COMPRESS_ZSTD", Const, 21}, + {"Chdr32", Type, 6}, + {"Chdr32.Addralign", Field, 6}, + {"Chdr32.Size", Field, 6}, + {"Chdr32.Type", Field, 6}, + {"Chdr64", Type, 6}, + {"Chdr64.Addralign", Field, 6}, + {"Chdr64.Size", Field, 6}, + {"Chdr64.Type", Field, 6}, + {"Class", Type, 0}, + {"CompressionType", Type, 6}, + {"DF_1_CONFALT", Const, 21}, + {"DF_1_DIRECT", Const, 21}, + {"DF_1_DISPRELDNE", Const, 21}, + {"DF_1_DISPRELPND", Const, 21}, + {"DF_1_EDITED", Const, 21}, + {"DF_1_ENDFILTEE", Const, 21}, + {"DF_1_GLOBAL", Const, 21}, + {"DF_1_GLOBAUDIT", Const, 21}, + {"DF_1_GROUP", Const, 21}, + {"DF_1_IGNMULDEF", Const, 21}, + {"DF_1_INITFIRST", Const, 21}, + {"DF_1_INTERPOSE", Const, 21}, + {"DF_1_KMOD", Const, 21}, + {"DF_1_LOADFLTR", Const, 21}, + {"DF_1_NOCOMMON", Const, 21}, + {"DF_1_NODEFLIB", Const, 21}, + {"DF_1_NODELETE", Const, 21}, + {"DF_1_NODIRECT", Const, 21}, + {"DF_1_NODUMP", Const, 21}, + {"DF_1_NOHDR", Const, 21}, + {"DF_1_NOKSYMS", Const, 21}, + {"DF_1_NOOPEN", Const, 21}, + {"DF_1_NORELOC", Const, 21}, + {"DF_1_NOW", Const, 21}, + {"DF_1_ORIGIN", Const, 21}, + {"DF_1_PIE", Const, 21}, + {"DF_1_SINGLETON", Const, 21}, + {"DF_1_STUB", Const, 21}, + {"DF_1_SYMINTPOSE", Const, 21}, + {"DF_1_TRANS", Const, 21}, + {"DF_1_WEAKFILTER", Const, 21}, + {"DF_BIND_NOW", Const, 0}, + {"DF_ORIGIN", Const, 0}, + {"DF_STATIC_TLS", Const, 0}, + {"DF_SYMBOLIC", Const, 0}, + {"DF_TEXTREL", Const, 0}, + {"DT_ADDRRNGHI", Const, 16}, + {"DT_ADDRRNGLO", Const, 16}, + {"DT_AUDIT", Const, 16}, + {"DT_AUXILIARY", Const, 16}, + {"DT_BIND_NOW", Const, 0}, + {"DT_CHECKSUM", Const, 16}, + {"DT_CONFIG", Const, 16}, + {"DT_DEBUG", Const, 0}, + {"DT_DEPAUDIT", Const, 16}, + {"DT_ENCODING", Const, 0}, + {"DT_FEATURE", Const, 16}, + {"DT_FILTER", Const, 16}, + {"DT_FINI", Const, 0}, + {"DT_FINI_ARRAY", Const, 0}, + {"DT_FINI_ARRAYSZ", Const, 0}, + {"DT_FLAGS", Const, 0}, + {"DT_FLAGS_1", Const, 16}, + {"DT_GNU_CONFLICT", Const, 16}, + {"DT_GNU_CONFLICTSZ", Const, 16}, + {"DT_GNU_HASH", Const, 16}, + {"DT_GNU_LIBLIST", Const, 16}, + {"DT_GNU_LIBLISTSZ", Const, 16}, + {"DT_GNU_PRELINKED", Const, 16}, + {"DT_HASH", Const, 0}, + {"DT_HIOS", Const, 0}, + {"DT_HIPROC", Const, 0}, + {"DT_INIT", Const, 0}, + {"DT_INIT_ARRAY", Const, 0}, + {"DT_INIT_ARRAYSZ", Const, 0}, + {"DT_JMPREL", Const, 0}, + {"DT_LOOS", Const, 0}, + {"DT_LOPROC", Const, 0}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16}, + {"DT_MIPS_BASE_ADDRESS", Const, 16}, + {"DT_MIPS_COMPACT_SIZE", Const, 16}, + {"DT_MIPS_CONFLICT", Const, 16}, + {"DT_MIPS_CONFLICTNO", Const, 16}, + {"DT_MIPS_CXX_FLAGS", Const, 16}, + {"DT_MIPS_DELTA_CLASS", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16}, + {"DT_MIPS_DELTA_RELOC", Const, 16}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16}, + {"DT_MIPS_DELTA_SYM", Const, 16}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16}, + {"DT_MIPS_FLAGS", Const, 16}, + {"DT_MIPS_GOTSYM", Const, 16}, + {"DT_MIPS_GP_VALUE", Const, 16}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16}, + {"DT_MIPS_HIPAGENO", Const, 16}, + {"DT_MIPS_ICHECKSUM", Const, 16}, + {"DT_MIPS_INTERFACE", Const, 16}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16}, + {"DT_MIPS_IVERSION", Const, 16}, + {"DT_MIPS_LIBLIST", Const, 16}, + {"DT_MIPS_LIBLISTNO", Const, 16}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16}, + {"DT_MIPS_MSYM", Const, 16}, + {"DT_MIPS_OPTIONS", Const, 16}, + {"DT_MIPS_PERF_SUFFIX", Const, 16}, + {"DT_MIPS_PIXIE_INIT", Const, 16}, + {"DT_MIPS_PLTGOT", Const, 16}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16}, + {"DT_MIPS_RLD_MAP", Const, 16}, + {"DT_MIPS_RLD_MAP_REL", Const, 16}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16}, + {"DT_MIPS_RLD_VERSION", Const, 16}, + {"DT_MIPS_RWPLT", Const, 16}, + {"DT_MIPS_SYMBOL_LIB", Const, 16}, + {"DT_MIPS_SYMTABNO", Const, 16}, + {"DT_MIPS_TIME_STAMP", Const, 16}, + {"DT_MIPS_UNREFEXTNO", Const, 16}, + {"DT_MOVEENT", Const, 16}, + {"DT_MOVESZ", Const, 16}, + {"DT_MOVETAB", Const, 16}, + {"DT_NEEDED", Const, 0}, + {"DT_NULL", Const, 0}, + {"DT_PLTGOT", Const, 0}, + {"DT_PLTPAD", Const, 16}, + {"DT_PLTPADSZ", Const, 16}, + {"DT_PLTREL", Const, 0}, + {"DT_PLTRELSZ", Const, 0}, + {"DT_POSFLAG_1", Const, 16}, + {"DT_PPC64_GLINK", Const, 16}, + {"DT_PPC64_OPD", Const, 16}, + {"DT_PPC64_OPDSZ", Const, 16}, + {"DT_PPC64_OPT", Const, 16}, + {"DT_PPC_GOT", Const, 16}, + {"DT_PPC_OPT", Const, 16}, + {"DT_PREINIT_ARRAY", Const, 0}, + {"DT_PREINIT_ARRAYSZ", Const, 0}, + {"DT_REL", Const, 0}, + {"DT_RELA", Const, 0}, + {"DT_RELACOUNT", Const, 16}, + {"DT_RELAENT", Const, 0}, + {"DT_RELASZ", Const, 0}, + {"DT_RELCOUNT", Const, 16}, + {"DT_RELENT", Const, 0}, + {"DT_RELSZ", Const, 0}, + {"DT_RPATH", Const, 0}, + {"DT_RUNPATH", Const, 0}, + {"DT_SONAME", Const, 0}, + {"DT_SPARC_REGISTER", Const, 16}, + {"DT_STRSZ", Const, 0}, + {"DT_STRTAB", Const, 0}, + {"DT_SYMBOLIC", Const, 0}, + {"DT_SYMENT", Const, 0}, + {"DT_SYMINENT", Const, 16}, + {"DT_SYMINFO", Const, 16}, + {"DT_SYMINSZ", Const, 16}, + {"DT_SYMTAB", Const, 0}, + {"DT_SYMTAB_SHNDX", Const, 16}, + {"DT_TEXTREL", Const, 0}, + {"DT_TLSDESC_GOT", Const, 16}, + {"DT_TLSDESC_PLT", Const, 16}, + {"DT_USED", Const, 16}, + {"DT_VALRNGHI", Const, 16}, + {"DT_VALRNGLO", Const, 16}, + {"DT_VERDEF", Const, 16}, + {"DT_VERDEFNUM", Const, 16}, + {"DT_VERNEED", Const, 0}, + {"DT_VERNEEDNUM", Const, 0}, + {"DT_VERSYM", Const, 0}, + {"Data", Type, 0}, + {"Dyn32", Type, 0}, + {"Dyn32.Tag", Field, 0}, + {"Dyn32.Val", Field, 0}, + {"Dyn64", Type, 0}, + {"Dyn64.Tag", Field, 0}, + {"Dyn64.Val", Field, 0}, + {"DynFlag", Type, 0}, + {"DynFlag1", Type, 21}, + {"DynTag", Type, 0}, + {"EI_ABIVERSION", Const, 0}, + {"EI_CLASS", Const, 0}, + {"EI_DATA", Const, 0}, + {"EI_NIDENT", Const, 0}, + {"EI_OSABI", Const, 0}, + {"EI_PAD", Const, 0}, + {"EI_VERSION", Const, 0}, + {"ELFCLASS32", Const, 0}, + {"ELFCLASS64", Const, 0}, + {"ELFCLASSNONE", Const, 0}, + {"ELFDATA2LSB", Const, 0}, + {"ELFDATA2MSB", Const, 0}, + {"ELFDATANONE", Const, 0}, + {"ELFMAG", Const, 0}, + {"ELFOSABI_86OPEN", Const, 0}, + {"ELFOSABI_AIX", Const, 0}, + {"ELFOSABI_ARM", Const, 0}, + {"ELFOSABI_AROS", Const, 11}, + {"ELFOSABI_CLOUDABI", Const, 11}, + {"ELFOSABI_FENIXOS", Const, 11}, + {"ELFOSABI_FREEBSD", Const, 0}, + {"ELFOSABI_HPUX", Const, 0}, + {"ELFOSABI_HURD", Const, 0}, + {"ELFOSABI_IRIX", Const, 0}, + {"ELFOSABI_LINUX", Const, 0}, + {"ELFOSABI_MODESTO", Const, 0}, + {"ELFOSABI_NETBSD", Const, 0}, + {"ELFOSABI_NONE", Const, 0}, + {"ELFOSABI_NSK", Const, 0}, + {"ELFOSABI_OPENBSD", Const, 0}, + {"ELFOSABI_OPENVMS", Const, 0}, + {"ELFOSABI_SOLARIS", Const, 0}, + {"ELFOSABI_STANDALONE", Const, 0}, + {"ELFOSABI_TRU64", Const, 0}, + {"EM_386", Const, 0}, + {"EM_486", Const, 0}, + {"EM_56800EX", Const, 11}, + {"EM_68HC05", Const, 11}, + {"EM_68HC08", Const, 11}, + {"EM_68HC11", Const, 11}, + {"EM_68HC12", Const, 0}, + {"EM_68HC16", Const, 11}, + {"EM_68K", Const, 0}, + {"EM_78KOR", Const, 11}, + {"EM_8051", Const, 11}, + {"EM_860", Const, 0}, + {"EM_88K", Const, 0}, + {"EM_960", Const, 0}, + {"EM_AARCH64", Const, 4}, + {"EM_ALPHA", Const, 0}, + {"EM_ALPHA_STD", Const, 0}, + {"EM_ALTERA_NIOS2", Const, 11}, + {"EM_AMDGPU", Const, 11}, + {"EM_ARC", Const, 0}, + {"EM_ARCA", Const, 11}, + {"EM_ARC_COMPACT", Const, 11}, + {"EM_ARC_COMPACT2", Const, 11}, + {"EM_ARM", Const, 0}, + {"EM_AVR", Const, 11}, + {"EM_AVR32", Const, 11}, + {"EM_BA1", Const, 11}, + {"EM_BA2", Const, 11}, + {"EM_BLACKFIN", Const, 11}, + {"EM_BPF", Const, 11}, + {"EM_C166", Const, 11}, + {"EM_CDP", Const, 11}, + {"EM_CE", Const, 11}, + {"EM_CLOUDSHIELD", Const, 11}, + {"EM_COGE", Const, 11}, + {"EM_COLDFIRE", Const, 0}, + {"EM_COOL", Const, 11}, + {"EM_COREA_1ST", Const, 11}, + {"EM_COREA_2ND", Const, 11}, + {"EM_CR", Const, 11}, + {"EM_CR16", Const, 11}, + {"EM_CRAYNV2", Const, 11}, + {"EM_CRIS", Const, 11}, + {"EM_CRX", Const, 11}, + {"EM_CSR_KALIMBA", Const, 11}, + {"EM_CUDA", Const, 11}, + {"EM_CYPRESS_M8C", Const, 11}, + {"EM_D10V", Const, 11}, + {"EM_D30V", Const, 11}, + {"EM_DSP24", Const, 11}, + {"EM_DSPIC30F", Const, 11}, + {"EM_DXP", Const, 11}, + {"EM_ECOG1", Const, 11}, + {"EM_ECOG16", Const, 11}, + {"EM_ECOG1X", Const, 11}, + {"EM_ECOG2", Const, 11}, + {"EM_ETPU", Const, 11}, + {"EM_EXCESS", Const, 11}, + {"EM_F2MC16", Const, 11}, + {"EM_FIREPATH", Const, 11}, + {"EM_FR20", Const, 0}, + {"EM_FR30", Const, 11}, + {"EM_FT32", Const, 11}, + {"EM_FX66", Const, 11}, + {"EM_H8S", Const, 0}, + {"EM_H8_300", Const, 0}, + {"EM_H8_300H", Const, 0}, + {"EM_H8_500", Const, 0}, + {"EM_HUANY", Const, 11}, + {"EM_IA_64", Const, 0}, + {"EM_INTEL205", Const, 11}, + {"EM_INTEL206", Const, 11}, + {"EM_INTEL207", Const, 11}, + {"EM_INTEL208", Const, 11}, + {"EM_INTEL209", Const, 11}, + {"EM_IP2K", Const, 11}, + {"EM_JAVELIN", Const, 11}, + {"EM_K10M", Const, 11}, + {"EM_KM32", Const, 11}, + {"EM_KMX16", Const, 11}, + {"EM_KMX32", Const, 11}, + {"EM_KMX8", Const, 11}, + {"EM_KVARC", Const, 11}, + {"EM_L10M", Const, 11}, + {"EM_LANAI", Const, 11}, + {"EM_LATTICEMICO32", Const, 11}, + {"EM_LOONGARCH", Const, 19}, + {"EM_M16C", Const, 11}, + {"EM_M32", Const, 0}, + {"EM_M32C", Const, 11}, + {"EM_M32R", Const, 11}, + {"EM_MANIK", Const, 11}, + {"EM_MAX", Const, 11}, + {"EM_MAXQ30", Const, 11}, + {"EM_MCHP_PIC", Const, 11}, + {"EM_MCST_ELBRUS", Const, 11}, + {"EM_ME16", Const, 0}, + {"EM_METAG", Const, 11}, + {"EM_MICROBLAZE", Const, 11}, + {"EM_MIPS", Const, 0}, + {"EM_MIPS_RS3_LE", Const, 0}, + {"EM_MIPS_RS4_BE", Const, 0}, + {"EM_MIPS_X", Const, 0}, + {"EM_MMA", Const, 0}, + {"EM_MMDSP_PLUS", Const, 11}, + {"EM_MMIX", Const, 11}, + {"EM_MN10200", Const, 11}, + {"EM_MN10300", Const, 11}, + {"EM_MOXIE", Const, 11}, + {"EM_MSP430", Const, 11}, + {"EM_NCPU", Const, 0}, + {"EM_NDR1", Const, 0}, + {"EM_NDS32", Const, 11}, + {"EM_NONE", Const, 0}, + {"EM_NORC", Const, 11}, + {"EM_NS32K", Const, 11}, + {"EM_OPEN8", Const, 11}, + {"EM_OPENRISC", Const, 11}, + {"EM_PARISC", Const, 0}, + {"EM_PCP", Const, 0}, + {"EM_PDP10", Const, 11}, + {"EM_PDP11", Const, 11}, + {"EM_PDSP", Const, 11}, + {"EM_PJ", Const, 11}, + {"EM_PPC", Const, 0}, + {"EM_PPC64", Const, 0}, + {"EM_PRISM", Const, 11}, + {"EM_QDSP6", Const, 11}, + {"EM_R32C", Const, 11}, + {"EM_RCE", Const, 0}, + {"EM_RH32", Const, 0}, + {"EM_RISCV", Const, 11}, + {"EM_RL78", Const, 11}, + {"EM_RS08", Const, 11}, + {"EM_RX", Const, 11}, + {"EM_S370", Const, 0}, + {"EM_S390", Const, 0}, + {"EM_SCORE7", Const, 11}, + {"EM_SEP", Const, 11}, + {"EM_SE_C17", Const, 11}, + {"EM_SE_C33", Const, 11}, + {"EM_SH", Const, 0}, + {"EM_SHARC", Const, 11}, + {"EM_SLE9X", Const, 11}, + {"EM_SNP1K", Const, 11}, + {"EM_SPARC", Const, 0}, + {"EM_SPARC32PLUS", Const, 0}, + {"EM_SPARCV9", Const, 0}, + {"EM_ST100", Const, 0}, + {"EM_ST19", Const, 11}, + {"EM_ST200", Const, 11}, + {"EM_ST7", Const, 11}, + {"EM_ST9PLUS", Const, 11}, + {"EM_STARCORE", Const, 0}, + {"EM_STM8", Const, 11}, + {"EM_STXP7X", Const, 11}, + {"EM_SVX", Const, 11}, + {"EM_TILE64", Const, 11}, + {"EM_TILEGX", Const, 11}, + {"EM_TILEPRO", Const, 11}, + {"EM_TINYJ", Const, 0}, + {"EM_TI_ARP32", Const, 11}, + {"EM_TI_C2000", Const, 11}, + {"EM_TI_C5500", Const, 11}, + {"EM_TI_C6000", Const, 11}, + {"EM_TI_PRU", Const, 11}, + {"EM_TMM_GPP", Const, 11}, + {"EM_TPC", Const, 11}, + {"EM_TRICORE", Const, 0}, + {"EM_TRIMEDIA", Const, 11}, + {"EM_TSK3000", Const, 11}, + {"EM_UNICORE", Const, 11}, + {"EM_V800", Const, 0}, + {"EM_V850", Const, 11}, + {"EM_VAX", Const, 11}, + {"EM_VIDEOCORE", Const, 11}, + {"EM_VIDEOCORE3", Const, 11}, + {"EM_VIDEOCORE5", Const, 11}, + {"EM_VISIUM", Const, 11}, + {"EM_VPP500", Const, 0}, + {"EM_X86_64", Const, 0}, + {"EM_XCORE", Const, 11}, + {"EM_XGATE", Const, 11}, + {"EM_XIMO16", Const, 11}, + {"EM_XTENSA", Const, 11}, + {"EM_Z80", Const, 11}, + {"EM_ZSP", Const, 11}, + {"ET_CORE", Const, 0}, + {"ET_DYN", Const, 0}, + {"ET_EXEC", Const, 0}, + {"ET_HIOS", Const, 0}, + {"ET_HIPROC", Const, 0}, + {"ET_LOOS", Const, 0}, + {"ET_LOPROC", Const, 0}, + {"ET_NONE", Const, 0}, + {"ET_REL", Const, 0}, + {"EV_CURRENT", Const, 0}, + {"EV_NONE", Const, 0}, + {"ErrNoSymbols", Var, 4}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"File.Progs", Field, 0}, + {"File.Sections", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.ABIVersion", Field, 0}, + {"FileHeader.ByteOrder", Field, 0}, + {"FileHeader.Class", Field, 0}, + {"FileHeader.Data", Field, 0}, + {"FileHeader.Entry", Field, 1}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.OSABI", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FileHeader.Version", Field, 0}, + {"FormatError", Type, 0}, + {"Header32", Type, 0}, + {"Header32.Ehsize", Field, 0}, + {"Header32.Entry", Field, 0}, + {"Header32.Flags", Field, 0}, + {"Header32.Ident", Field, 0}, + {"Header32.Machine", Field, 0}, + {"Header32.Phentsize", Field, 0}, + {"Header32.Phnum", Field, 0}, + {"Header32.Phoff", Field, 0}, + {"Header32.Shentsize", Field, 0}, + {"Header32.Shnum", Field, 0}, + {"Header32.Shoff", Field, 0}, + {"Header32.Shstrndx", Field, 0}, + {"Header32.Type", Field, 0}, + {"Header32.Version", Field, 0}, + {"Header64", Type, 0}, + {"Header64.Ehsize", Field, 0}, + {"Header64.Entry", Field, 0}, + {"Header64.Flags", Field, 0}, + {"Header64.Ident", Field, 0}, + {"Header64.Machine", Field, 0}, + {"Header64.Phentsize", Field, 0}, + {"Header64.Phnum", Field, 0}, + {"Header64.Phoff", Field, 0}, + {"Header64.Shentsize", Field, 0}, + {"Header64.Shnum", Field, 0}, + {"Header64.Shoff", Field, 0}, + {"Header64.Shstrndx", Field, 0}, + {"Header64.Type", Field, 0}, + {"Header64.Version", Field, 0}, + {"ImportedSymbol", Type, 0}, + {"ImportedSymbol.Library", Field, 0}, + {"ImportedSymbol.Name", Field, 0}, + {"ImportedSymbol.Version", Field, 0}, + {"Machine", Type, 0}, + {"NT_FPREGSET", Const, 0}, + {"NT_PRPSINFO", Const, 0}, + {"NT_PRSTATUS", Const, 0}, + {"NType", Type, 0}, + {"NewFile", Func, 0}, + {"OSABI", Type, 0}, + {"Open", Func, 0}, + {"PF_MASKOS", Const, 0}, + {"PF_MASKPROC", Const, 0}, + {"PF_R", Const, 0}, + {"PF_W", Const, 0}, + {"PF_X", Const, 0}, + {"PT_AARCH64_ARCHEXT", Const, 16}, + {"PT_AARCH64_UNWIND", Const, 16}, + {"PT_ARM_ARCHEXT", Const, 16}, + {"PT_ARM_EXIDX", Const, 16}, + {"PT_DYNAMIC", Const, 0}, + {"PT_GNU_EH_FRAME", Const, 16}, + {"PT_GNU_MBIND_HI", Const, 16}, + {"PT_GNU_MBIND_LO", Const, 16}, + {"PT_GNU_PROPERTY", Const, 16}, + {"PT_GNU_RELRO", Const, 16}, + {"PT_GNU_STACK", Const, 16}, + {"PT_HIOS", Const, 0}, + {"PT_HIPROC", Const, 0}, + {"PT_INTERP", Const, 0}, + {"PT_LOAD", Const, 0}, + {"PT_LOOS", Const, 0}, + {"PT_LOPROC", Const, 0}, + {"PT_MIPS_ABIFLAGS", Const, 16}, + {"PT_MIPS_OPTIONS", Const, 16}, + {"PT_MIPS_REGINFO", Const, 16}, + {"PT_MIPS_RTPROC", Const, 16}, + {"PT_NOTE", Const, 0}, + {"PT_NULL", Const, 0}, + {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_RANDOMIZE", Const, 16}, + {"PT_OPENBSD_WXNEEDED", Const, 16}, + {"PT_PAX_FLAGS", Const, 16}, + {"PT_PHDR", Const, 0}, + {"PT_S390_PGSTE", Const, 16}, + {"PT_SHLIB", Const, 0}, + {"PT_SUNWSTACK", Const, 16}, + {"PT_SUNW_EH_FRAME", Const, 16}, + {"PT_TLS", Const, 0}, + {"Prog", Type, 0}, + {"Prog.ProgHeader", Field, 0}, + {"Prog.ReaderAt", Field, 0}, + {"Prog32", Type, 0}, + {"Prog32.Align", Field, 0}, + {"Prog32.Filesz", Field, 0}, + {"Prog32.Flags", Field, 0}, + {"Prog32.Memsz", Field, 0}, + {"Prog32.Off", Field, 0}, + {"Prog32.Paddr", Field, 0}, + {"Prog32.Type", Field, 0}, + {"Prog32.Vaddr", Field, 0}, + {"Prog64", Type, 0}, + {"Prog64.Align", Field, 0}, + {"Prog64.Filesz", Field, 0}, + {"Prog64.Flags", Field, 0}, + {"Prog64.Memsz", Field, 0}, + {"Prog64.Off", Field, 0}, + {"Prog64.Paddr", Field, 0}, + {"Prog64.Type", Field, 0}, + {"Prog64.Vaddr", Field, 0}, + {"ProgFlag", Type, 0}, + {"ProgHeader", Type, 0}, + {"ProgHeader.Align", Field, 0}, + {"ProgHeader.Filesz", Field, 0}, + {"ProgHeader.Flags", Field, 0}, + {"ProgHeader.Memsz", Field, 0}, + {"ProgHeader.Off", Field, 0}, + {"ProgHeader.Paddr", Field, 0}, + {"ProgHeader.Type", Field, 0}, + {"ProgHeader.Vaddr", Field, 0}, + {"ProgType", Type, 0}, + {"R_386", Type, 0}, + {"R_386_16", Const, 10}, + {"R_386_32", Const, 0}, + {"R_386_32PLT", Const, 10}, + {"R_386_8", Const, 10}, + {"R_386_COPY", Const, 0}, + {"R_386_GLOB_DAT", Const, 0}, + {"R_386_GOT32", Const, 0}, + {"R_386_GOT32X", Const, 10}, + {"R_386_GOTOFF", Const, 0}, + {"R_386_GOTPC", Const, 0}, + {"R_386_IRELATIVE", Const, 10}, + {"R_386_JMP_SLOT", Const, 0}, + {"R_386_NONE", Const, 0}, + {"R_386_PC16", Const, 10}, + {"R_386_PC32", Const, 0}, + {"R_386_PC8", Const, 10}, + {"R_386_PLT32", Const, 0}, + {"R_386_RELATIVE", Const, 0}, + {"R_386_SIZE32", Const, 10}, + {"R_386_TLS_DESC", Const, 10}, + {"R_386_TLS_DESC_CALL", Const, 10}, + {"R_386_TLS_DTPMOD32", Const, 0}, + {"R_386_TLS_DTPOFF32", Const, 0}, + {"R_386_TLS_GD", Const, 0}, + {"R_386_TLS_GD_32", Const, 0}, + {"R_386_TLS_GD_CALL", Const, 0}, + {"R_386_TLS_GD_POP", Const, 0}, + {"R_386_TLS_GD_PUSH", Const, 0}, + {"R_386_TLS_GOTDESC", Const, 10}, + {"R_386_TLS_GOTIE", Const, 0}, + {"R_386_TLS_IE", Const, 0}, + {"R_386_TLS_IE_32", Const, 0}, + {"R_386_TLS_LDM", Const, 0}, + {"R_386_TLS_LDM_32", Const, 0}, + {"R_386_TLS_LDM_CALL", Const, 0}, + {"R_386_TLS_LDM_POP", Const, 0}, + {"R_386_TLS_LDM_PUSH", Const, 0}, + {"R_386_TLS_LDO_32", Const, 0}, + {"R_386_TLS_LE", Const, 0}, + {"R_386_TLS_LE_32", Const, 0}, + {"R_386_TLS_TPOFF", Const, 0}, + {"R_386_TLS_TPOFF32", Const, 0}, + {"R_390", Type, 7}, + {"R_390_12", Const, 7}, + {"R_390_16", Const, 7}, + {"R_390_20", Const, 7}, + {"R_390_32", Const, 7}, + {"R_390_64", Const, 7}, + {"R_390_8", Const, 7}, + {"R_390_COPY", Const, 7}, + {"R_390_GLOB_DAT", Const, 7}, + {"R_390_GOT12", Const, 7}, + {"R_390_GOT16", Const, 7}, + {"R_390_GOT20", Const, 7}, + {"R_390_GOT32", Const, 7}, + {"R_390_GOT64", Const, 7}, + {"R_390_GOTENT", Const, 7}, + {"R_390_GOTOFF", Const, 7}, + {"R_390_GOTOFF16", Const, 7}, + {"R_390_GOTOFF64", Const, 7}, + {"R_390_GOTPC", Const, 7}, + {"R_390_GOTPCDBL", Const, 7}, + {"R_390_GOTPLT12", Const, 7}, + {"R_390_GOTPLT16", Const, 7}, + {"R_390_GOTPLT20", Const, 7}, + {"R_390_GOTPLT32", Const, 7}, + {"R_390_GOTPLT64", Const, 7}, + {"R_390_GOTPLTENT", Const, 7}, + {"R_390_GOTPLTOFF16", Const, 7}, + {"R_390_GOTPLTOFF32", Const, 7}, + {"R_390_GOTPLTOFF64", Const, 7}, + {"R_390_JMP_SLOT", Const, 7}, + {"R_390_NONE", Const, 7}, + {"R_390_PC16", Const, 7}, + {"R_390_PC16DBL", Const, 7}, + {"R_390_PC32", Const, 7}, + {"R_390_PC32DBL", Const, 7}, + {"R_390_PC64", Const, 7}, + {"R_390_PLT16DBL", Const, 7}, + {"R_390_PLT32", Const, 7}, + {"R_390_PLT32DBL", Const, 7}, + {"R_390_PLT64", Const, 7}, + {"R_390_RELATIVE", Const, 7}, + {"R_390_TLS_DTPMOD", Const, 7}, + {"R_390_TLS_DTPOFF", Const, 7}, + {"R_390_TLS_GD32", Const, 7}, + {"R_390_TLS_GD64", Const, 7}, + {"R_390_TLS_GDCALL", Const, 7}, + {"R_390_TLS_GOTIE12", Const, 7}, + {"R_390_TLS_GOTIE20", Const, 7}, + {"R_390_TLS_GOTIE32", Const, 7}, + {"R_390_TLS_GOTIE64", Const, 7}, + {"R_390_TLS_IE32", Const, 7}, + {"R_390_TLS_IE64", Const, 7}, + {"R_390_TLS_IEENT", Const, 7}, + {"R_390_TLS_LDCALL", Const, 7}, + {"R_390_TLS_LDM32", Const, 7}, + {"R_390_TLS_LDM64", Const, 7}, + {"R_390_TLS_LDO32", Const, 7}, + {"R_390_TLS_LDO64", Const, 7}, + {"R_390_TLS_LE32", Const, 7}, + {"R_390_TLS_LE64", Const, 7}, + {"R_390_TLS_LOAD", Const, 7}, + {"R_390_TLS_TPOFF", Const, 7}, + {"R_AARCH64", Type, 4}, + {"R_AARCH64_ABS16", Const, 4}, + {"R_AARCH64_ABS32", Const, 4}, + {"R_AARCH64_ABS64", Const, 4}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4}, + {"R_AARCH64_CALL26", Const, 4}, + {"R_AARCH64_CONDBR19", Const, 4}, + {"R_AARCH64_COPY", Const, 4}, + {"R_AARCH64_GLOB_DAT", Const, 4}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_IRELATIVE", Const, 4}, + {"R_AARCH64_JUMP26", Const, 4}, + {"R_AARCH64_JUMP_SLOT", Const, 4}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4}, + {"R_AARCH64_NONE", Const, 4}, + {"R_AARCH64_NULL", Const, 4}, + {"R_AARCH64_P32_ABS16", Const, 4}, + {"R_AARCH64_P32_ABS32", Const, 4}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_P32_CALL26", Const, 4}, + {"R_AARCH64_P32_CONDBR19", Const, 4}, + {"R_AARCH64_P32_COPY", Const, 4}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_IRELATIVE", Const, 4}, + {"R_AARCH64_P32_JUMP26", Const, 4}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_P32_PREL16", Const, 4}, + {"R_AARCH64_P32_PREL32", Const, 4}, + {"R_AARCH64_P32_RELATIVE", Const, 4}, + {"R_AARCH64_P32_TLSDESC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4}, + {"R_AARCH64_P32_TSTBR14", Const, 4}, + {"R_AARCH64_PREL16", Const, 4}, + {"R_AARCH64_PREL32", Const, 4}, + {"R_AARCH64_PREL64", Const, 4}, + {"R_AARCH64_RELATIVE", Const, 4}, + {"R_AARCH64_TLSDESC", Const, 4}, + {"R_AARCH64_TLSDESC_ADD", Const, 4}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_LDR", Const, 4}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4}, + {"R_AARCH64_TLS_DTPREL64", Const, 4}, + {"R_AARCH64_TLS_TPREL64", Const, 4}, + {"R_AARCH64_TSTBR14", Const, 4}, + {"R_ALPHA", Type, 0}, + {"R_ALPHA_BRADDR", Const, 0}, + {"R_ALPHA_COPY", Const, 0}, + {"R_ALPHA_GLOB_DAT", Const, 0}, + {"R_ALPHA_GPDISP", Const, 0}, + {"R_ALPHA_GPREL32", Const, 0}, + {"R_ALPHA_GPRELHIGH", Const, 0}, + {"R_ALPHA_GPRELLOW", Const, 0}, + {"R_ALPHA_GPVALUE", Const, 0}, + {"R_ALPHA_HINT", Const, 0}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0}, + {"R_ALPHA_IMMED_GP_16", Const, 0}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0}, + {"R_ALPHA_IMMED_LO32", Const, 0}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0}, + {"R_ALPHA_JMP_SLOT", Const, 0}, + {"R_ALPHA_LITERAL", Const, 0}, + {"R_ALPHA_LITUSE", Const, 0}, + {"R_ALPHA_NONE", Const, 0}, + {"R_ALPHA_OP_PRSHIFT", Const, 0}, + {"R_ALPHA_OP_PSUB", Const, 0}, + {"R_ALPHA_OP_PUSH", Const, 0}, + {"R_ALPHA_OP_STORE", Const, 0}, + {"R_ALPHA_REFLONG", Const, 0}, + {"R_ALPHA_REFQUAD", Const, 0}, + {"R_ALPHA_RELATIVE", Const, 0}, + {"R_ALPHA_SREL16", Const, 0}, + {"R_ALPHA_SREL32", Const, 0}, + {"R_ALPHA_SREL64", Const, 0}, + {"R_ARM", Type, 0}, + {"R_ARM_ABS12", Const, 0}, + {"R_ARM_ABS16", Const, 0}, + {"R_ARM_ABS32", Const, 0}, + {"R_ARM_ABS32_NOI", Const, 10}, + {"R_ARM_ABS8", Const, 0}, + {"R_ARM_ALU_PCREL_15_8", Const, 10}, + {"R_ARM_ALU_PCREL_23_15", Const, 10}, + {"R_ARM_ALU_PCREL_7_0", Const, 10}, + {"R_ARM_ALU_PC_G0", Const, 10}, + {"R_ARM_ALU_PC_G0_NC", Const, 10}, + {"R_ARM_ALU_PC_G1", Const, 10}, + {"R_ARM_ALU_PC_G1_NC", Const, 10}, + {"R_ARM_ALU_PC_G2", Const, 10}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10}, + {"R_ARM_ALU_SB_G0", Const, 10}, + {"R_ARM_ALU_SB_G0_NC", Const, 10}, + {"R_ARM_ALU_SB_G1", Const, 10}, + {"R_ARM_ALU_SB_G1_NC", Const, 10}, + {"R_ARM_ALU_SB_G2", Const, 10}, + {"R_ARM_AMP_VCALL9", Const, 0}, + {"R_ARM_BASE_ABS", Const, 10}, + {"R_ARM_CALL", Const, 10}, + {"R_ARM_COPY", Const, 0}, + {"R_ARM_GLOB_DAT", Const, 0}, + {"R_ARM_GNU_VTENTRY", Const, 0}, + {"R_ARM_GNU_VTINHERIT", Const, 0}, + {"R_ARM_GOT32", Const, 0}, + {"R_ARM_GOTOFF", Const, 0}, + {"R_ARM_GOTOFF12", Const, 10}, + {"R_ARM_GOTPC", Const, 0}, + {"R_ARM_GOTRELAX", Const, 10}, + {"R_ARM_GOT_ABS", Const, 10}, + {"R_ARM_GOT_BREL12", Const, 10}, + {"R_ARM_GOT_PREL", Const, 10}, + {"R_ARM_IRELATIVE", Const, 10}, + {"R_ARM_JUMP24", Const, 10}, + {"R_ARM_JUMP_SLOT", Const, 0}, + {"R_ARM_LDC_PC_G0", Const, 10}, + {"R_ARM_LDC_PC_G1", Const, 10}, + {"R_ARM_LDC_PC_G2", Const, 10}, + {"R_ARM_LDC_SB_G0", Const, 10}, + {"R_ARM_LDC_SB_G1", Const, 10}, + {"R_ARM_LDC_SB_G2", Const, 10}, + {"R_ARM_LDRS_PC_G0", Const, 10}, + {"R_ARM_LDRS_PC_G1", Const, 10}, + {"R_ARM_LDRS_PC_G2", Const, 10}, + {"R_ARM_LDRS_SB_G0", Const, 10}, + {"R_ARM_LDRS_SB_G1", Const, 10}, + {"R_ARM_LDRS_SB_G2", Const, 10}, + {"R_ARM_LDR_PC_G1", Const, 10}, + {"R_ARM_LDR_PC_G2", Const, 10}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10}, + {"R_ARM_LDR_SB_G0", Const, 10}, + {"R_ARM_LDR_SB_G1", Const, 10}, + {"R_ARM_LDR_SB_G2", Const, 10}, + {"R_ARM_ME_TOO", Const, 10}, + {"R_ARM_MOVT_ABS", Const, 10}, + {"R_ARM_MOVT_BREL", Const, 10}, + {"R_ARM_MOVT_PREL", Const, 10}, + {"R_ARM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_MOVW_BREL", Const, 10}, + {"R_ARM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_NONE", Const, 0}, + {"R_ARM_PC13", Const, 0}, + {"R_ARM_PC24", Const, 0}, + {"R_ARM_PLT32", Const, 0}, + {"R_ARM_PLT32_ABS", Const, 10}, + {"R_ARM_PREL31", Const, 10}, + {"R_ARM_PRIVATE_0", Const, 10}, + {"R_ARM_PRIVATE_1", Const, 10}, + {"R_ARM_PRIVATE_10", Const, 10}, + {"R_ARM_PRIVATE_11", Const, 10}, + {"R_ARM_PRIVATE_12", Const, 10}, + {"R_ARM_PRIVATE_13", Const, 10}, + {"R_ARM_PRIVATE_14", Const, 10}, + {"R_ARM_PRIVATE_15", Const, 10}, + {"R_ARM_PRIVATE_2", Const, 10}, + {"R_ARM_PRIVATE_3", Const, 10}, + {"R_ARM_PRIVATE_4", Const, 10}, + {"R_ARM_PRIVATE_5", Const, 10}, + {"R_ARM_PRIVATE_6", Const, 10}, + {"R_ARM_PRIVATE_7", Const, 10}, + {"R_ARM_PRIVATE_8", Const, 10}, + {"R_ARM_PRIVATE_9", Const, 10}, + {"R_ARM_RABS32", Const, 0}, + {"R_ARM_RBASE", Const, 0}, + {"R_ARM_REL32", Const, 0}, + {"R_ARM_REL32_NOI", Const, 10}, + {"R_ARM_RELATIVE", Const, 0}, + {"R_ARM_RPC24", Const, 0}, + {"R_ARM_RREL32", Const, 0}, + {"R_ARM_RSBREL32", Const, 0}, + {"R_ARM_RXPC25", Const, 10}, + {"R_ARM_SBREL31", Const, 10}, + {"R_ARM_SBREL32", Const, 0}, + {"R_ARM_SWI24", Const, 0}, + {"R_ARM_TARGET1", Const, 10}, + {"R_ARM_TARGET2", Const, 10}, + {"R_ARM_THM_ABS5", Const, 0}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10}, + {"R_ARM_THM_GOT_BREL12", Const, 10}, + {"R_ARM_THM_JUMP11", Const, 10}, + {"R_ARM_THM_JUMP19", Const, 10}, + {"R_ARM_THM_JUMP24", Const, 10}, + {"R_ARM_THM_JUMP6", Const, 10}, + {"R_ARM_THM_JUMP8", Const, 10}, + {"R_ARM_THM_MOVT_ABS", Const, 10}, + {"R_ARM_THM_MOVT_BREL", Const, 10}, + {"R_ARM_THM_MOVT_PREL", Const, 10}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_THM_MOVW_BREL", Const, 10}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_THM_PC12", Const, 10}, + {"R_ARM_THM_PC22", Const, 0}, + {"R_ARM_THM_PC8", Const, 0}, + {"R_ARM_THM_RPC22", Const, 0}, + {"R_ARM_THM_SWI8", Const, 0}, + {"R_ARM_THM_TLS_CALL", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10}, + {"R_ARM_THM_XPC22", Const, 0}, + {"R_ARM_TLS_CALL", Const, 10}, + {"R_ARM_TLS_DESCSEQ", Const, 10}, + {"R_ARM_TLS_DTPMOD32", Const, 10}, + {"R_ARM_TLS_DTPOFF32", Const, 10}, + {"R_ARM_TLS_GD32", Const, 10}, + {"R_ARM_TLS_GOTDESC", Const, 10}, + {"R_ARM_TLS_IE12GP", Const, 10}, + {"R_ARM_TLS_IE32", Const, 10}, + {"R_ARM_TLS_LDM32", Const, 10}, + {"R_ARM_TLS_LDO12", Const, 10}, + {"R_ARM_TLS_LDO32", Const, 10}, + {"R_ARM_TLS_LE12", Const, 10}, + {"R_ARM_TLS_LE32", Const, 10}, + {"R_ARM_TLS_TPOFF32", Const, 10}, + {"R_ARM_V4BX", Const, 10}, + {"R_ARM_XPC25", Const, 0}, + {"R_INFO", Func, 0}, + {"R_INFO32", Func, 0}, + {"R_LARCH", Type, 19}, + {"R_LARCH_32", Const, 19}, + {"R_LARCH_32_PCREL", Const, 20}, + {"R_LARCH_64", Const, 19}, + {"R_LARCH_64_PCREL", Const, 22}, + {"R_LARCH_ABS64_HI12", Const, 20}, + {"R_LARCH_ABS64_LO20", Const, 20}, + {"R_LARCH_ABS_HI20", Const, 20}, + {"R_LARCH_ABS_LO12", Const, 20}, + {"R_LARCH_ADD16", Const, 19}, + {"R_LARCH_ADD24", Const, 19}, + {"R_LARCH_ADD32", Const, 19}, + {"R_LARCH_ADD6", Const, 22}, + {"R_LARCH_ADD64", Const, 19}, + {"R_LARCH_ADD8", Const, 19}, + {"R_LARCH_ADD_ULEB128", Const, 22}, + {"R_LARCH_ALIGN", Const, 22}, + {"R_LARCH_B16", Const, 20}, + {"R_LARCH_B21", Const, 20}, + {"R_LARCH_B26", Const, 20}, + {"R_LARCH_CFA", Const, 22}, + {"R_LARCH_COPY", Const, 19}, + {"R_LARCH_DELETE", Const, 22}, + {"R_LARCH_GNU_VTENTRY", Const, 20}, + {"R_LARCH_GNU_VTINHERIT", Const, 20}, + {"R_LARCH_GOT64_HI12", Const, 20}, + {"R_LARCH_GOT64_LO20", Const, 20}, + {"R_LARCH_GOT64_PC_HI12", Const, 20}, + {"R_LARCH_GOT64_PC_LO20", Const, 20}, + {"R_LARCH_GOT_HI20", Const, 20}, + {"R_LARCH_GOT_LO12", Const, 20}, + {"R_LARCH_GOT_PC_HI20", Const, 20}, + {"R_LARCH_GOT_PC_LO12", Const, 20}, + {"R_LARCH_IRELATIVE", Const, 19}, + {"R_LARCH_JUMP_SLOT", Const, 19}, + {"R_LARCH_MARK_LA", Const, 19}, + {"R_LARCH_MARK_PCREL", Const, 19}, + {"R_LARCH_NONE", Const, 19}, + {"R_LARCH_PCALA64_HI12", Const, 20}, + {"R_LARCH_PCALA64_LO20", Const, 20}, + {"R_LARCH_PCALA_HI20", Const, 20}, + {"R_LARCH_PCALA_LO12", Const, 20}, + {"R_LARCH_PCREL20_S2", Const, 22}, + {"R_LARCH_RELATIVE", Const, 19}, + {"R_LARCH_RELAX", Const, 20}, + {"R_LARCH_SOP_ADD", Const, 19}, + {"R_LARCH_SOP_AND", Const, 19}, + {"R_LARCH_SOP_ASSERT", Const, 19}, + {"R_LARCH_SOP_IF_ELSE", Const, 19}, + {"R_LARCH_SOP_NOT", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19}, + {"R_LARCH_SOP_POP_32_U", Const, 19}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19}, + {"R_LARCH_SOP_SL", Const, 19}, + {"R_LARCH_SOP_SR", Const, 19}, + {"R_LARCH_SOP_SUB", Const, 19}, + {"R_LARCH_SUB16", Const, 19}, + {"R_LARCH_SUB24", Const, 19}, + {"R_LARCH_SUB32", Const, 19}, + {"R_LARCH_SUB6", Const, 22}, + {"R_LARCH_SUB64", Const, 19}, + {"R_LARCH_SUB8", Const, 19}, + {"R_LARCH_SUB_ULEB128", Const, 22}, + {"R_LARCH_TLS_DTPMOD32", Const, 19}, + {"R_LARCH_TLS_DTPMOD64", Const, 19}, + {"R_LARCH_TLS_DTPREL32", Const, 19}, + {"R_LARCH_TLS_DTPREL64", Const, 19}, + {"R_LARCH_TLS_GD_HI20", Const, 20}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE64_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_LO20", Const, 20}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20}, + {"R_LARCH_TLS_IE_HI20", Const, 20}, + {"R_LARCH_TLS_IE_LO12", Const, 20}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20}, + {"R_LARCH_TLS_LD_HI20", Const, 20}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_LE64_HI12", Const, 20}, + {"R_LARCH_TLS_LE64_LO20", Const, 20}, + {"R_LARCH_TLS_LE_HI20", Const, 20}, + {"R_LARCH_TLS_LE_LO12", Const, 20}, + {"R_LARCH_TLS_TPREL32", Const, 19}, + {"R_LARCH_TLS_TPREL64", Const, 19}, + {"R_MIPS", Type, 6}, + {"R_MIPS_16", Const, 6}, + {"R_MIPS_26", Const, 6}, + {"R_MIPS_32", Const, 6}, + {"R_MIPS_64", Const, 6}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6}, + {"R_MIPS_CALL16", Const, 6}, + {"R_MIPS_CALL_HI16", Const, 6}, + {"R_MIPS_CALL_LO16", Const, 6}, + {"R_MIPS_DELETE", Const, 6}, + {"R_MIPS_GOT16", Const, 6}, + {"R_MIPS_GOT_DISP", Const, 6}, + {"R_MIPS_GOT_HI16", Const, 6}, + {"R_MIPS_GOT_LO16", Const, 6}, + {"R_MIPS_GOT_OFST", Const, 6}, + {"R_MIPS_GOT_PAGE", Const, 6}, + {"R_MIPS_GPREL16", Const, 6}, + {"R_MIPS_GPREL32", Const, 6}, + {"R_MIPS_HI16", Const, 6}, + {"R_MIPS_HIGHER", Const, 6}, + {"R_MIPS_HIGHEST", Const, 6}, + {"R_MIPS_INSERT_A", Const, 6}, + {"R_MIPS_INSERT_B", Const, 6}, + {"R_MIPS_JALR", Const, 6}, + {"R_MIPS_LITERAL", Const, 6}, + {"R_MIPS_LO16", Const, 6}, + {"R_MIPS_NONE", Const, 6}, + {"R_MIPS_PC16", Const, 6}, + {"R_MIPS_PC32", Const, 22}, + {"R_MIPS_PJUMP", Const, 6}, + {"R_MIPS_REL16", Const, 6}, + {"R_MIPS_REL32", Const, 6}, + {"R_MIPS_RELGOT", Const, 6}, + {"R_MIPS_SCN_DISP", Const, 6}, + {"R_MIPS_SHIFT5", Const, 6}, + {"R_MIPS_SHIFT6", Const, 6}, + {"R_MIPS_SUB", Const, 6}, + {"R_MIPS_TLS_DTPMOD32", Const, 6}, + {"R_MIPS_TLS_DTPMOD64", Const, 6}, + {"R_MIPS_TLS_DTPREL32", Const, 6}, + {"R_MIPS_TLS_DTPREL64", Const, 6}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6}, + {"R_MIPS_TLS_GD", Const, 6}, + {"R_MIPS_TLS_GOTTPREL", Const, 6}, + {"R_MIPS_TLS_LDM", Const, 6}, + {"R_MIPS_TLS_TPREL32", Const, 6}, + {"R_MIPS_TLS_TPREL64", Const, 6}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6}, + {"R_PPC", Type, 0}, + {"R_PPC64", Type, 5}, + {"R_PPC64_ADDR14", Const, 5}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5}, + {"R_PPC64_ADDR16", Const, 5}, + {"R_PPC64_ADDR16_DS", Const, 5}, + {"R_PPC64_ADDR16_HA", Const, 5}, + {"R_PPC64_ADDR16_HI", Const, 5}, + {"R_PPC64_ADDR16_HIGH", Const, 10}, + {"R_PPC64_ADDR16_HIGHA", Const, 10}, + {"R_PPC64_ADDR16_HIGHER", Const, 5}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20}, + {"R_PPC64_ADDR16_LO", Const, 5}, + {"R_PPC64_ADDR16_LO_DS", Const, 5}, + {"R_PPC64_ADDR24", Const, 5}, + {"R_PPC64_ADDR32", Const, 5}, + {"R_PPC64_ADDR64", Const, 5}, + {"R_PPC64_ADDR64_LOCAL", Const, 10}, + {"R_PPC64_COPY", Const, 20}, + {"R_PPC64_D28", Const, 20}, + {"R_PPC64_D34", Const, 20}, + {"R_PPC64_D34_HA30", Const, 20}, + {"R_PPC64_D34_HI30", Const, 20}, + {"R_PPC64_D34_LO", Const, 20}, + {"R_PPC64_DTPMOD64", Const, 5}, + {"R_PPC64_DTPREL16", Const, 5}, + {"R_PPC64_DTPREL16_DS", Const, 5}, + {"R_PPC64_DTPREL16_HA", Const, 5}, + {"R_PPC64_DTPREL16_HI", Const, 5}, + {"R_PPC64_DTPREL16_HIGH", Const, 10}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_DTPREL16_LO", Const, 5}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_DTPREL34", Const, 20}, + {"R_PPC64_DTPREL64", Const, 5}, + {"R_PPC64_ENTRY", Const, 10}, + {"R_PPC64_GLOB_DAT", Const, 20}, + {"R_PPC64_GNU_VTENTRY", Const, 20}, + {"R_PPC64_GNU_VTINHERIT", Const, 20}, + {"R_PPC64_GOT16", Const, 5}, + {"R_PPC64_GOT16_DS", Const, 5}, + {"R_PPC64_GOT16_HA", Const, 5}, + {"R_PPC64_GOT16_HI", Const, 5}, + {"R_PPC64_GOT16_LO", Const, 5}, + {"R_PPC64_GOT16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20}, + {"R_PPC64_GOT_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSGD16", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSLD16", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20}, + {"R_PPC64_IRELATIVE", Const, 10}, + {"R_PPC64_JMP_IREL", Const, 10}, + {"R_PPC64_JMP_SLOT", Const, 5}, + {"R_PPC64_NONE", Const, 5}, + {"R_PPC64_PCREL28", Const, 20}, + {"R_PPC64_PCREL34", Const, 20}, + {"R_PPC64_PCREL_OPT", Const, 20}, + {"R_PPC64_PLT16_HA", Const, 20}, + {"R_PPC64_PLT16_HI", Const, 20}, + {"R_PPC64_PLT16_LO", Const, 20}, + {"R_PPC64_PLT16_LO_DS", Const, 10}, + {"R_PPC64_PLT32", Const, 20}, + {"R_PPC64_PLT64", Const, 20}, + {"R_PPC64_PLTCALL", Const, 20}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20}, + {"R_PPC64_PLTGOT16", Const, 10}, + {"R_PPC64_PLTGOT16_DS", Const, 10}, + {"R_PPC64_PLTGOT16_HA", Const, 10}, + {"R_PPC64_PLTGOT16_HI", Const, 10}, + {"R_PPC64_PLTGOT16_LO", Const, 10}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10}, + {"R_PPC64_PLTREL32", Const, 20}, + {"R_PPC64_PLTREL64", Const, 20}, + {"R_PPC64_PLTSEQ", Const, 20}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20}, + {"R_PPC64_PLT_PCREL34", Const, 20}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20}, + {"R_PPC64_REL14", Const, 5}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5}, + {"R_PPC64_REL14_BRTAKEN", Const, 5}, + {"R_PPC64_REL16", Const, 5}, + {"R_PPC64_REL16DX_HA", Const, 10}, + {"R_PPC64_REL16_HA", Const, 5}, + {"R_PPC64_REL16_HI", Const, 5}, + {"R_PPC64_REL16_HIGH", Const, 20}, + {"R_PPC64_REL16_HIGHA", Const, 20}, + {"R_PPC64_REL16_HIGHER", Const, 20}, + {"R_PPC64_REL16_HIGHER34", Const, 20}, + {"R_PPC64_REL16_HIGHERA", Const, 20}, + {"R_PPC64_REL16_HIGHERA34", Const, 20}, + {"R_PPC64_REL16_HIGHEST", Const, 20}, + {"R_PPC64_REL16_HIGHEST34", Const, 20}, + {"R_PPC64_REL16_HIGHESTA", Const, 20}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20}, + {"R_PPC64_REL16_LO", Const, 5}, + {"R_PPC64_REL24", Const, 5}, + {"R_PPC64_REL24_NOTOC", Const, 10}, + {"R_PPC64_REL24_P9NOTOC", Const, 21}, + {"R_PPC64_REL30", Const, 20}, + {"R_PPC64_REL32", Const, 5}, + {"R_PPC64_REL64", Const, 5}, + {"R_PPC64_RELATIVE", Const, 18}, + {"R_PPC64_SECTOFF", Const, 20}, + {"R_PPC64_SECTOFF_DS", Const, 10}, + {"R_PPC64_SECTOFF_HA", Const, 20}, + {"R_PPC64_SECTOFF_HI", Const, 20}, + {"R_PPC64_SECTOFF_LO", Const, 20}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10}, + {"R_PPC64_TLS", Const, 5}, + {"R_PPC64_TLSGD", Const, 5}, + {"R_PPC64_TLSLD", Const, 5}, + {"R_PPC64_TOC", Const, 5}, + {"R_PPC64_TOC16", Const, 5}, + {"R_PPC64_TOC16_DS", Const, 5}, + {"R_PPC64_TOC16_HA", Const, 5}, + {"R_PPC64_TOC16_HI", Const, 5}, + {"R_PPC64_TOC16_LO", Const, 5}, + {"R_PPC64_TOC16_LO_DS", Const, 5}, + {"R_PPC64_TOCSAVE", Const, 10}, + {"R_PPC64_TPREL16", Const, 5}, + {"R_PPC64_TPREL16_DS", Const, 5}, + {"R_PPC64_TPREL16_HA", Const, 5}, + {"R_PPC64_TPREL16_HI", Const, 5}, + {"R_PPC64_TPREL16_HIGH", Const, 10}, + {"R_PPC64_TPREL16_HIGHA", Const, 10}, + {"R_PPC64_TPREL16_HIGHER", Const, 5}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_TPREL16_LO", Const, 5}, + {"R_PPC64_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_TPREL34", Const, 20}, + {"R_PPC64_TPREL64", Const, 5}, + {"R_PPC64_UADDR16", Const, 20}, + {"R_PPC64_UADDR32", Const, 20}, + {"R_PPC64_UADDR64", Const, 20}, + {"R_PPC_ADDR14", Const, 0}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0}, + {"R_PPC_ADDR16", Const, 0}, + {"R_PPC_ADDR16_HA", Const, 0}, + {"R_PPC_ADDR16_HI", Const, 0}, + {"R_PPC_ADDR16_LO", Const, 0}, + {"R_PPC_ADDR24", Const, 0}, + {"R_PPC_ADDR32", Const, 0}, + {"R_PPC_COPY", Const, 0}, + {"R_PPC_DTPMOD32", Const, 0}, + {"R_PPC_DTPREL16", Const, 0}, + {"R_PPC_DTPREL16_HA", Const, 0}, + {"R_PPC_DTPREL16_HI", Const, 0}, + {"R_PPC_DTPREL16_LO", Const, 0}, + {"R_PPC_DTPREL32", Const, 0}, + {"R_PPC_EMB_BIT_FLD", Const, 0}, + {"R_PPC_EMB_MRKREF", Const, 0}, + {"R_PPC_EMB_NADDR16", Const, 0}, + {"R_PPC_EMB_NADDR16_HA", Const, 0}, + {"R_PPC_EMB_NADDR16_HI", Const, 0}, + {"R_PPC_EMB_NADDR16_LO", Const, 0}, + {"R_PPC_EMB_NADDR32", Const, 0}, + {"R_PPC_EMB_RELSDA", Const, 0}, + {"R_PPC_EMB_RELSEC16", Const, 0}, + {"R_PPC_EMB_RELST_HA", Const, 0}, + {"R_PPC_EMB_RELST_HI", Const, 0}, + {"R_PPC_EMB_RELST_LO", Const, 0}, + {"R_PPC_EMB_SDA21", Const, 0}, + {"R_PPC_EMB_SDA2I16", Const, 0}, + {"R_PPC_EMB_SDA2REL", Const, 0}, + {"R_PPC_EMB_SDAI16", Const, 0}, + {"R_PPC_GLOB_DAT", Const, 0}, + {"R_PPC_GOT16", Const, 0}, + {"R_PPC_GOT16_HA", Const, 0}, + {"R_PPC_GOT16_HI", Const, 0}, + {"R_PPC_GOT16_LO", Const, 0}, + {"R_PPC_GOT_TLSGD16", Const, 0}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0}, + {"R_PPC_GOT_TLSLD16", Const, 0}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0}, + {"R_PPC_GOT_TPREL16", Const, 0}, + {"R_PPC_GOT_TPREL16_HA", Const, 0}, + {"R_PPC_GOT_TPREL16_HI", Const, 0}, + {"R_PPC_GOT_TPREL16_LO", Const, 0}, + {"R_PPC_JMP_SLOT", Const, 0}, + {"R_PPC_LOCAL24PC", Const, 0}, + {"R_PPC_NONE", Const, 0}, + {"R_PPC_PLT16_HA", Const, 0}, + {"R_PPC_PLT16_HI", Const, 0}, + {"R_PPC_PLT16_LO", Const, 0}, + {"R_PPC_PLT32", Const, 0}, + {"R_PPC_PLTREL24", Const, 0}, + {"R_PPC_PLTREL32", Const, 0}, + {"R_PPC_REL14", Const, 0}, + {"R_PPC_REL14_BRNTAKEN", Const, 0}, + {"R_PPC_REL14_BRTAKEN", Const, 0}, + {"R_PPC_REL24", Const, 0}, + {"R_PPC_REL32", Const, 0}, + {"R_PPC_RELATIVE", Const, 0}, + {"R_PPC_SDAREL16", Const, 0}, + {"R_PPC_SECTOFF", Const, 0}, + {"R_PPC_SECTOFF_HA", Const, 0}, + {"R_PPC_SECTOFF_HI", Const, 0}, + {"R_PPC_SECTOFF_LO", Const, 0}, + {"R_PPC_TLS", Const, 0}, + {"R_PPC_TPREL16", Const, 0}, + {"R_PPC_TPREL16_HA", Const, 0}, + {"R_PPC_TPREL16_HI", Const, 0}, + {"R_PPC_TPREL16_LO", Const, 0}, + {"R_PPC_TPREL32", Const, 0}, + {"R_PPC_UADDR16", Const, 0}, + {"R_PPC_UADDR32", Const, 0}, + {"R_RISCV", Type, 11}, + {"R_RISCV_32", Const, 11}, + {"R_RISCV_32_PCREL", Const, 12}, + {"R_RISCV_64", Const, 11}, + {"R_RISCV_ADD16", Const, 11}, + {"R_RISCV_ADD32", Const, 11}, + {"R_RISCV_ADD64", Const, 11}, + {"R_RISCV_ADD8", Const, 11}, + {"R_RISCV_ALIGN", Const, 11}, + {"R_RISCV_BRANCH", Const, 11}, + {"R_RISCV_CALL", Const, 11}, + {"R_RISCV_CALL_PLT", Const, 11}, + {"R_RISCV_COPY", Const, 11}, + {"R_RISCV_GNU_VTENTRY", Const, 11}, + {"R_RISCV_GNU_VTINHERIT", Const, 11}, + {"R_RISCV_GOT_HI20", Const, 11}, + {"R_RISCV_GPREL_I", Const, 11}, + {"R_RISCV_GPREL_S", Const, 11}, + {"R_RISCV_HI20", Const, 11}, + {"R_RISCV_JAL", Const, 11}, + {"R_RISCV_JUMP_SLOT", Const, 11}, + {"R_RISCV_LO12_I", Const, 11}, + {"R_RISCV_LO12_S", Const, 11}, + {"R_RISCV_NONE", Const, 11}, + {"R_RISCV_PCREL_HI20", Const, 11}, + {"R_RISCV_PCREL_LO12_I", Const, 11}, + {"R_RISCV_PCREL_LO12_S", Const, 11}, + {"R_RISCV_RELATIVE", Const, 11}, + {"R_RISCV_RELAX", Const, 11}, + {"R_RISCV_RVC_BRANCH", Const, 11}, + {"R_RISCV_RVC_JUMP", Const, 11}, + {"R_RISCV_RVC_LUI", Const, 11}, + {"R_RISCV_SET16", Const, 11}, + {"R_RISCV_SET32", Const, 11}, + {"R_RISCV_SET6", Const, 11}, + {"R_RISCV_SET8", Const, 11}, + {"R_RISCV_SUB16", Const, 11}, + {"R_RISCV_SUB32", Const, 11}, + {"R_RISCV_SUB6", Const, 11}, + {"R_RISCV_SUB64", Const, 11}, + {"R_RISCV_SUB8", Const, 11}, + {"R_RISCV_TLS_DTPMOD32", Const, 11}, + {"R_RISCV_TLS_DTPMOD64", Const, 11}, + {"R_RISCV_TLS_DTPREL32", Const, 11}, + {"R_RISCV_TLS_DTPREL64", Const, 11}, + {"R_RISCV_TLS_GD_HI20", Const, 11}, + {"R_RISCV_TLS_GOT_HI20", Const, 11}, + {"R_RISCV_TLS_TPREL32", Const, 11}, + {"R_RISCV_TLS_TPREL64", Const, 11}, + {"R_RISCV_TPREL_ADD", Const, 11}, + {"R_RISCV_TPREL_HI20", Const, 11}, + {"R_RISCV_TPREL_I", Const, 11}, + {"R_RISCV_TPREL_LO12_I", Const, 11}, + {"R_RISCV_TPREL_LO12_S", Const, 11}, + {"R_RISCV_TPREL_S", Const, 11}, + {"R_SPARC", Type, 0}, + {"R_SPARC_10", Const, 0}, + {"R_SPARC_11", Const, 0}, + {"R_SPARC_13", Const, 0}, + {"R_SPARC_16", Const, 0}, + {"R_SPARC_22", Const, 0}, + {"R_SPARC_32", Const, 0}, + {"R_SPARC_5", Const, 0}, + {"R_SPARC_6", Const, 0}, + {"R_SPARC_64", Const, 0}, + {"R_SPARC_7", Const, 0}, + {"R_SPARC_8", Const, 0}, + {"R_SPARC_COPY", Const, 0}, + {"R_SPARC_DISP16", Const, 0}, + {"R_SPARC_DISP32", Const, 0}, + {"R_SPARC_DISP64", Const, 0}, + {"R_SPARC_DISP8", Const, 0}, + {"R_SPARC_GLOB_DAT", Const, 0}, + {"R_SPARC_GLOB_JMP", Const, 0}, + {"R_SPARC_GOT10", Const, 0}, + {"R_SPARC_GOT13", Const, 0}, + {"R_SPARC_GOT22", Const, 0}, + {"R_SPARC_H44", Const, 0}, + {"R_SPARC_HH22", Const, 0}, + {"R_SPARC_HI22", Const, 0}, + {"R_SPARC_HIPLT22", Const, 0}, + {"R_SPARC_HIX22", Const, 0}, + {"R_SPARC_HM10", Const, 0}, + {"R_SPARC_JMP_SLOT", Const, 0}, + {"R_SPARC_L44", Const, 0}, + {"R_SPARC_LM22", Const, 0}, + {"R_SPARC_LO10", Const, 0}, + {"R_SPARC_LOPLT10", Const, 0}, + {"R_SPARC_LOX10", Const, 0}, + {"R_SPARC_M44", Const, 0}, + {"R_SPARC_NONE", Const, 0}, + {"R_SPARC_OLO10", Const, 0}, + {"R_SPARC_PC10", Const, 0}, + {"R_SPARC_PC22", Const, 0}, + {"R_SPARC_PCPLT10", Const, 0}, + {"R_SPARC_PCPLT22", Const, 0}, + {"R_SPARC_PCPLT32", Const, 0}, + {"R_SPARC_PC_HH22", Const, 0}, + {"R_SPARC_PC_HM10", Const, 0}, + {"R_SPARC_PC_LM22", Const, 0}, + {"R_SPARC_PLT32", Const, 0}, + {"R_SPARC_PLT64", Const, 0}, + {"R_SPARC_REGISTER", Const, 0}, + {"R_SPARC_RELATIVE", Const, 0}, + {"R_SPARC_UA16", Const, 0}, + {"R_SPARC_UA32", Const, 0}, + {"R_SPARC_UA64", Const, 0}, + {"R_SPARC_WDISP16", Const, 0}, + {"R_SPARC_WDISP19", Const, 0}, + {"R_SPARC_WDISP22", Const, 0}, + {"R_SPARC_WDISP30", Const, 0}, + {"R_SPARC_WPLT30", Const, 0}, + {"R_SYM32", Func, 0}, + {"R_SYM64", Func, 0}, + {"R_TYPE32", Func, 0}, + {"R_TYPE64", Func, 0}, + {"R_X86_64", Type, 0}, + {"R_X86_64_16", Const, 0}, + {"R_X86_64_32", Const, 0}, + {"R_X86_64_32S", Const, 0}, + {"R_X86_64_64", Const, 0}, + {"R_X86_64_8", Const, 0}, + {"R_X86_64_COPY", Const, 0}, + {"R_X86_64_DTPMOD64", Const, 0}, + {"R_X86_64_DTPOFF32", Const, 0}, + {"R_X86_64_DTPOFF64", Const, 0}, + {"R_X86_64_GLOB_DAT", Const, 0}, + {"R_X86_64_GOT32", Const, 0}, + {"R_X86_64_GOT64", Const, 10}, + {"R_X86_64_GOTOFF64", Const, 10}, + {"R_X86_64_GOTPC32", Const, 10}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10}, + {"R_X86_64_GOTPC64", Const, 10}, + {"R_X86_64_GOTPCREL", Const, 0}, + {"R_X86_64_GOTPCREL64", Const, 10}, + {"R_X86_64_GOTPCRELX", Const, 10}, + {"R_X86_64_GOTPLT64", Const, 10}, + {"R_X86_64_GOTTPOFF", Const, 0}, + {"R_X86_64_IRELATIVE", Const, 10}, + {"R_X86_64_JMP_SLOT", Const, 0}, + {"R_X86_64_NONE", Const, 0}, + {"R_X86_64_PC16", Const, 0}, + {"R_X86_64_PC32", Const, 0}, + {"R_X86_64_PC32_BND", Const, 10}, + {"R_X86_64_PC64", Const, 10}, + {"R_X86_64_PC8", Const, 0}, + {"R_X86_64_PLT32", Const, 0}, + {"R_X86_64_PLT32_BND", Const, 10}, + {"R_X86_64_PLTOFF64", Const, 10}, + {"R_X86_64_RELATIVE", Const, 0}, + {"R_X86_64_RELATIVE64", Const, 10}, + {"R_X86_64_REX_GOTPCRELX", Const, 10}, + {"R_X86_64_SIZE32", Const, 10}, + {"R_X86_64_SIZE64", Const, 10}, + {"R_X86_64_TLSDESC", Const, 10}, + {"R_X86_64_TLSDESC_CALL", Const, 10}, + {"R_X86_64_TLSGD", Const, 0}, + {"R_X86_64_TLSLD", Const, 0}, + {"R_X86_64_TPOFF32", Const, 0}, + {"R_X86_64_TPOFF64", Const, 0}, + {"Rel32", Type, 0}, + {"Rel32.Info", Field, 0}, + {"Rel32.Off", Field, 0}, + {"Rel64", Type, 0}, + {"Rel64.Info", Field, 0}, + {"Rel64.Off", Field, 0}, + {"Rela32", Type, 0}, + {"Rela32.Addend", Field, 0}, + {"Rela32.Info", Field, 0}, + {"Rela32.Off", Field, 0}, + {"Rela64", Type, 0}, + {"Rela64.Addend", Field, 0}, + {"Rela64.Info", Field, 0}, + {"Rela64.Off", Field, 0}, + {"SHF_ALLOC", Const, 0}, + {"SHF_COMPRESSED", Const, 6}, + {"SHF_EXECINSTR", Const, 0}, + {"SHF_GROUP", Const, 0}, + {"SHF_INFO_LINK", Const, 0}, + {"SHF_LINK_ORDER", Const, 0}, + {"SHF_MASKOS", Const, 0}, + {"SHF_MASKPROC", Const, 0}, + {"SHF_MERGE", Const, 0}, + {"SHF_OS_NONCONFORMING", Const, 0}, + {"SHF_STRINGS", Const, 0}, + {"SHF_TLS", Const, 0}, + {"SHF_WRITE", Const, 0}, + {"SHN_ABS", Const, 0}, + {"SHN_COMMON", Const, 0}, + {"SHN_HIOS", Const, 0}, + {"SHN_HIPROC", Const, 0}, + {"SHN_HIRESERVE", Const, 0}, + {"SHN_LOOS", Const, 0}, + {"SHN_LOPROC", Const, 0}, + {"SHN_LORESERVE", Const, 0}, + {"SHN_UNDEF", Const, 0}, + {"SHN_XINDEX", Const, 0}, + {"SHT_DYNAMIC", Const, 0}, + {"SHT_DYNSYM", Const, 0}, + {"SHT_FINI_ARRAY", Const, 0}, + {"SHT_GNU_ATTRIBUTES", Const, 0}, + {"SHT_GNU_HASH", Const, 0}, + {"SHT_GNU_LIBLIST", Const, 0}, + {"SHT_GNU_VERDEF", Const, 0}, + {"SHT_GNU_VERNEED", Const, 0}, + {"SHT_GNU_VERSYM", Const, 0}, + {"SHT_GROUP", Const, 0}, + {"SHT_HASH", Const, 0}, + {"SHT_HIOS", Const, 0}, + {"SHT_HIPROC", Const, 0}, + {"SHT_HIUSER", Const, 0}, + {"SHT_INIT_ARRAY", Const, 0}, + {"SHT_LOOS", Const, 0}, + {"SHT_LOPROC", Const, 0}, + {"SHT_LOUSER", Const, 0}, + {"SHT_MIPS_ABIFLAGS", Const, 17}, + {"SHT_NOBITS", Const, 0}, + {"SHT_NOTE", Const, 0}, + {"SHT_NULL", Const, 0}, + {"SHT_PREINIT_ARRAY", Const, 0}, + {"SHT_PROGBITS", Const, 0}, + {"SHT_REL", Const, 0}, + {"SHT_RELA", Const, 0}, + {"SHT_SHLIB", Const, 0}, + {"SHT_STRTAB", Const, 0}, + {"SHT_SYMTAB", Const, 0}, + {"SHT_SYMTAB_SHNDX", Const, 0}, + {"STB_GLOBAL", Const, 0}, + {"STB_HIOS", Const, 0}, + {"STB_HIPROC", Const, 0}, + {"STB_LOCAL", Const, 0}, + {"STB_LOOS", Const, 0}, + {"STB_LOPROC", Const, 0}, + {"STB_WEAK", Const, 0}, + {"STT_COMMON", Const, 0}, + {"STT_FILE", Const, 0}, + {"STT_FUNC", Const, 0}, + {"STT_HIOS", Const, 0}, + {"STT_HIPROC", Const, 0}, + {"STT_LOOS", Const, 0}, + {"STT_LOPROC", Const, 0}, + {"STT_NOTYPE", Const, 0}, + {"STT_OBJECT", Const, 0}, + {"STT_SECTION", Const, 0}, + {"STT_TLS", Const, 0}, + {"STV_DEFAULT", Const, 0}, + {"STV_HIDDEN", Const, 0}, + {"STV_INTERNAL", Const, 0}, + {"STV_PROTECTED", Const, 0}, + {"ST_BIND", Func, 0}, + {"ST_INFO", Func, 0}, + {"ST_TYPE", Func, 0}, + {"ST_VISIBILITY", Func, 0}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Addralign", Field, 0}, + {"Section32.Entsize", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Info", Field, 0}, + {"Section32.Link", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Off", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section32.Type", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Addralign", Field, 0}, + {"Section64.Entsize", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Info", Field, 0}, + {"Section64.Link", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Off", Field, 0}, + {"Section64.Size", Field, 0}, + {"Section64.Type", Field, 0}, + {"SectionFlag", Type, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Addralign", Field, 0}, + {"SectionHeader.Entsize", Field, 0}, + {"SectionHeader.FileSize", Field, 6}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Info", Field, 0}, + {"SectionHeader.Link", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.Type", Field, 0}, + {"SectionIndex", Type, 0}, + {"SectionType", Type, 0}, + {"Sym32", Type, 0}, + {"Sym32.Info", Field, 0}, + {"Sym32.Name", Field, 0}, + {"Sym32.Other", Field, 0}, + {"Sym32.Shndx", Field, 0}, + {"Sym32.Size", Field, 0}, + {"Sym32.Value", Field, 0}, + {"Sym32Size", Const, 0}, + {"Sym64", Type, 0}, + {"Sym64.Info", Field, 0}, + {"Sym64.Name", Field, 0}, + {"Sym64.Other", Field, 0}, + {"Sym64.Shndx", Field, 0}, + {"Sym64.Size", Field, 0}, + {"Sym64.Value", Field, 0}, + {"Sym64Size", Const, 0}, + {"SymBind", Type, 0}, + {"SymType", Type, 0}, + {"SymVis", Type, 0}, + {"Symbol", Type, 0}, + {"Symbol.Info", Field, 0}, + {"Symbol.Library", Field, 13}, + {"Symbol.Name", Field, 0}, + {"Symbol.Other", Field, 0}, + {"Symbol.Section", Field, 0}, + {"Symbol.Size", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symbol.Version", Field, 13}, + {"Type", Type, 0}, + {"Version", Type, 0}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0}, + {"(*LineTable).LineToPC", Method, 0}, + {"(*LineTable).PCToLine", Method, 0}, + {"(*Sym).BaseName", Method, 0}, + {"(*Sym).PackageName", Method, 0}, + {"(*Sym).ReceiverName", Method, 0}, + {"(*Sym).Static", Method, 0}, + {"(*Table).LineToPC", Method, 0}, + {"(*Table).LookupFunc", Method, 0}, + {"(*Table).LookupSym", Method, 0}, + {"(*Table).PCToFunc", Method, 0}, + {"(*Table).PCToLine", Method, 0}, + {"(*Table).SymByAddr", Method, 0}, + {"(*UnknownLineError).Error", Method, 0}, + {"(Func).BaseName", Method, 0}, + {"(Func).PackageName", Method, 0}, + {"(Func).ReceiverName", Method, 0}, + {"(Func).Static", Method, 0}, + {"(UnknownFileError).Error", Method, 0}, + {"DecodingError", Type, 0}, + {"Func", Type, 0}, + {"Func.End", Field, 0}, + {"Func.Entry", Field, 0}, + {"Func.FrameSize", Field, 0}, + {"Func.LineTable", Field, 0}, + {"Func.Locals", Field, 0}, + {"Func.Obj", Field, 0}, + {"Func.Params", Field, 0}, + {"Func.Sym", Field, 0}, + {"LineTable", Type, 0}, + {"LineTable.Data", Field, 0}, + {"LineTable.Line", Field, 0}, + {"LineTable.PC", Field, 0}, + {"NewLineTable", Func, 0}, + {"NewTable", Func, 0}, + {"Obj", Type, 0}, + {"Obj.Funcs", Field, 0}, + {"Obj.Paths", Field, 0}, + {"Sym", Type, 0}, + {"Sym.Func", Field, 0}, + {"Sym.GoType", Field, 0}, + {"Sym.Name", Field, 0}, + {"Sym.Type", Field, 0}, + {"Sym.Value", Field, 0}, + {"Table", Type, 0}, + {"Table.Files", Field, 0}, + {"Table.Funcs", Field, 0}, + {"Table.Objs", Field, 0}, + {"Table.Syms", Field, 0}, + {"UnknownFileError", Type, 0}, + {"UnknownLineError", Type, 0}, + {"UnknownLineError.File", Field, 0}, + {"UnknownLineError.Line", Field, 0}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).Segment", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(*Segment).Data", Method, 0}, + {"(*Segment).Open", Method, 0}, + {"(Cpu).GoString", Method, 0}, + {"(Cpu).String", Method, 0}, + {"(Dylib).Raw", Method, 0}, + {"(Dysymtab).Raw", Method, 0}, + {"(FatArch).Close", Method, 3}, + {"(FatArch).DWARF", Method, 3}, + {"(FatArch).ImportedLibraries", Method, 3}, + {"(FatArch).ImportedSymbols", Method, 3}, + {"(FatArch).Section", Method, 3}, + {"(FatArch).Segment", Method, 3}, + {"(LoadBytes).Raw", Method, 0}, + {"(LoadCmd).GoString", Method, 0}, + {"(LoadCmd).String", Method, 0}, + {"(RelocTypeARM).GoString", Method, 10}, + {"(RelocTypeARM).String", Method, 10}, + {"(RelocTypeARM64).GoString", Method, 10}, + {"(RelocTypeARM64).String", Method, 10}, + {"(RelocTypeGeneric).GoString", Method, 10}, + {"(RelocTypeGeneric).String", Method, 10}, + {"(RelocTypeX86_64).GoString", Method, 10}, + {"(RelocTypeX86_64).String", Method, 10}, + {"(Rpath).Raw", Method, 10}, + {"(Section).ReadAt", Method, 0}, + {"(Segment).Raw", Method, 0}, + {"(Segment).ReadAt", Method, 0}, + {"(Symtab).Raw", Method, 0}, + {"(Type).GoString", Method, 10}, + {"(Type).String", Method, 10}, + {"ARM64_RELOC_ADDEND", Const, 10}, + {"ARM64_RELOC_BRANCH26", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_PAGE21", Const, 10}, + {"ARM64_RELOC_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_UNSIGNED", Const, 10}, + {"ARM_RELOC_BR24", Const, 10}, + {"ARM_RELOC_HALF", Const, 10}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"ARM_RELOC_PAIR", Const, 10}, + {"ARM_RELOC_PB_LA_PTR", Const, 10}, + {"ARM_RELOC_SECTDIFF", Const, 10}, + {"ARM_RELOC_VANILLA", Const, 10}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10}, + {"ARM_THUMB_RELOC_BR22", Const, 10}, + {"Cpu", Type, 0}, + {"Cpu386", Const, 0}, + {"CpuAmd64", Const, 0}, + {"CpuArm", Const, 3}, + {"CpuArm64", Const, 11}, + {"CpuPpc", Const, 3}, + {"CpuPpc64", Const, 3}, + {"Dylib", Type, 0}, + {"Dylib.CompatVersion", Field, 0}, + {"Dylib.CurrentVersion", Field, 0}, + {"Dylib.LoadBytes", Field, 0}, + {"Dylib.Name", Field, 0}, + {"Dylib.Time", Field, 0}, + {"DylibCmd", Type, 0}, + {"DylibCmd.Cmd", Field, 0}, + {"DylibCmd.CompatVersion", Field, 0}, + {"DylibCmd.CurrentVersion", Field, 0}, + {"DylibCmd.Len", Field, 0}, + {"DylibCmd.Name", Field, 0}, + {"DylibCmd.Time", Field, 0}, + {"Dysymtab", Type, 0}, + {"Dysymtab.DysymtabCmd", Field, 0}, + {"Dysymtab.IndirectSyms", Field, 0}, + {"Dysymtab.LoadBytes", Field, 0}, + {"DysymtabCmd", Type, 0}, + {"DysymtabCmd.Cmd", Field, 0}, + {"DysymtabCmd.Extrefsymoff", Field, 0}, + {"DysymtabCmd.Extreloff", Field, 0}, + {"DysymtabCmd.Iextdefsym", Field, 0}, + {"DysymtabCmd.Ilocalsym", Field, 0}, + {"DysymtabCmd.Indirectsymoff", Field, 0}, + {"DysymtabCmd.Iundefsym", Field, 0}, + {"DysymtabCmd.Len", Field, 0}, + {"DysymtabCmd.Locreloff", Field, 0}, + {"DysymtabCmd.Modtaboff", Field, 0}, + {"DysymtabCmd.Nextdefsym", Field, 0}, + {"DysymtabCmd.Nextrefsyms", Field, 0}, + {"DysymtabCmd.Nextrel", Field, 0}, + {"DysymtabCmd.Nindirectsyms", Field, 0}, + {"DysymtabCmd.Nlocalsym", Field, 0}, + {"DysymtabCmd.Nlocrel", Field, 0}, + {"DysymtabCmd.Nmodtab", Field, 0}, + {"DysymtabCmd.Ntoc", Field, 0}, + {"DysymtabCmd.Nundefsym", Field, 0}, + {"DysymtabCmd.Tocoffset", Field, 0}, + {"ErrNotFat", Var, 3}, + {"FatArch", Type, 3}, + {"FatArch.FatArchHeader", Field, 3}, + {"FatArch.File", Field, 3}, + {"FatArchHeader", Type, 3}, + {"FatArchHeader.Align", Field, 3}, + {"FatArchHeader.Cpu", Field, 3}, + {"FatArchHeader.Offset", Field, 3}, + {"FatArchHeader.Size", Field, 3}, + {"FatArchHeader.SubCpu", Field, 3}, + {"FatFile", Type, 3}, + {"FatFile.Arches", Field, 3}, + {"FatFile.Magic", Field, 3}, + {"File", Type, 0}, + {"File.ByteOrder", Field, 0}, + {"File.Dysymtab", Field, 0}, + {"File.FileHeader", Field, 0}, + {"File.Loads", Field, 0}, + {"File.Sections", Field, 0}, + {"File.Symtab", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Cmdsz", Field, 0}, + {"FileHeader.Cpu", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Magic", Field, 0}, + {"FileHeader.Ncmd", Field, 0}, + {"FileHeader.SubCpu", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FlagAllModsBound", Const, 10}, + {"FlagAllowStackExecution", Const, 10}, + {"FlagAppExtensionSafe", Const, 10}, + {"FlagBindAtLoad", Const, 10}, + {"FlagBindsToWeak", Const, 10}, + {"FlagCanonical", Const, 10}, + {"FlagDeadStrippableDylib", Const, 10}, + {"FlagDyldLink", Const, 10}, + {"FlagForceFlat", Const, 10}, + {"FlagHasTLVDescriptors", Const, 10}, + {"FlagIncrLink", Const, 10}, + {"FlagLazyInit", Const, 10}, + {"FlagNoFixPrebinding", Const, 10}, + {"FlagNoHeapExecution", Const, 10}, + {"FlagNoMultiDefs", Const, 10}, + {"FlagNoReexportedDylibs", Const, 10}, + {"FlagNoUndefs", Const, 10}, + {"FlagPIE", Const, 10}, + {"FlagPrebindable", Const, 10}, + {"FlagPrebound", Const, 10}, + {"FlagRootSafe", Const, 10}, + {"FlagSetuidSafe", Const, 10}, + {"FlagSplitSegs", Const, 10}, + {"FlagSubsectionsViaSymbols", Const, 10}, + {"FlagTwoLevel", Const, 10}, + {"FlagWeakDefines", Const, 10}, + {"FormatError", Type, 0}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_PAIR", Const, 10}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10}, + {"GENERIC_RELOC_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_TLV", Const, 10}, + {"GENERIC_RELOC_VANILLA", Const, 10}, + {"Load", Type, 0}, + {"LoadBytes", Type, 0}, + {"LoadCmd", Type, 0}, + {"LoadCmdDylib", Const, 0}, + {"LoadCmdDylinker", Const, 0}, + {"LoadCmdDysymtab", Const, 0}, + {"LoadCmdRpath", Const, 10}, + {"LoadCmdSegment", Const, 0}, + {"LoadCmdSegment64", Const, 0}, + {"LoadCmdSymtab", Const, 0}, + {"LoadCmdThread", Const, 0}, + {"LoadCmdUnixThread", Const, 0}, + {"Magic32", Const, 0}, + {"Magic64", Const, 0}, + {"MagicFat", Const, 3}, + {"NewFatFile", Func, 3}, + {"NewFile", Func, 0}, + {"Nlist32", Type, 0}, + {"Nlist32.Desc", Field, 0}, + {"Nlist32.Name", Field, 0}, + {"Nlist32.Sect", Field, 0}, + {"Nlist32.Type", Field, 0}, + {"Nlist32.Value", Field, 0}, + {"Nlist64", Type, 0}, + {"Nlist64.Desc", Field, 0}, + {"Nlist64.Name", Field, 0}, + {"Nlist64.Sect", Field, 0}, + {"Nlist64.Type", Field, 0}, + {"Nlist64.Value", Field, 0}, + {"Open", Func, 0}, + {"OpenFat", Func, 3}, + {"Regs386", Type, 0}, + {"Regs386.AX", Field, 0}, + {"Regs386.BP", Field, 0}, + {"Regs386.BX", Field, 0}, + {"Regs386.CS", Field, 0}, + {"Regs386.CX", Field, 0}, + {"Regs386.DI", Field, 0}, + {"Regs386.DS", Field, 0}, + {"Regs386.DX", Field, 0}, + {"Regs386.ES", Field, 0}, + {"Regs386.FLAGS", Field, 0}, + {"Regs386.FS", Field, 0}, + {"Regs386.GS", Field, 0}, + {"Regs386.IP", Field, 0}, + {"Regs386.SI", Field, 0}, + {"Regs386.SP", Field, 0}, + {"Regs386.SS", Field, 0}, + {"RegsAMD64", Type, 0}, + {"RegsAMD64.AX", Field, 0}, + {"RegsAMD64.BP", Field, 0}, + {"RegsAMD64.BX", Field, 0}, + {"RegsAMD64.CS", Field, 0}, + {"RegsAMD64.CX", Field, 0}, + {"RegsAMD64.DI", Field, 0}, + {"RegsAMD64.DX", Field, 0}, + {"RegsAMD64.FLAGS", Field, 0}, + {"RegsAMD64.FS", Field, 0}, + {"RegsAMD64.GS", Field, 0}, + {"RegsAMD64.IP", Field, 0}, + {"RegsAMD64.R10", Field, 0}, + {"RegsAMD64.R11", Field, 0}, + {"RegsAMD64.R12", Field, 0}, + {"RegsAMD64.R13", Field, 0}, + {"RegsAMD64.R14", Field, 0}, + {"RegsAMD64.R15", Field, 0}, + {"RegsAMD64.R8", Field, 0}, + {"RegsAMD64.R9", Field, 0}, + {"RegsAMD64.SI", Field, 0}, + {"RegsAMD64.SP", Field, 0}, + {"Reloc", Type, 10}, + {"Reloc.Addr", Field, 10}, + {"Reloc.Extern", Field, 10}, + {"Reloc.Len", Field, 10}, + {"Reloc.Pcrel", Field, 10}, + {"Reloc.Scattered", Field, 10}, + {"Reloc.Type", Field, 10}, + {"Reloc.Value", Field, 10}, + {"RelocTypeARM", Type, 10}, + {"RelocTypeARM64", Type, 10}, + {"RelocTypeGeneric", Type, 10}, + {"RelocTypeX86_64", Type, 10}, + {"Rpath", Type, 10}, + {"Rpath.LoadBytes", Field, 10}, + {"Rpath.Path", Field, 10}, + {"RpathCmd", Type, 10}, + {"RpathCmd.Cmd", Field, 10}, + {"RpathCmd.Len", Field, 10}, + {"RpathCmd.Path", Field, 10}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 10}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Align", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Nreloc", Field, 0}, + {"Section32.Offset", Field, 0}, + {"Section32.Reloff", Field, 0}, + {"Section32.Reserve1", Field, 0}, + {"Section32.Reserve2", Field, 0}, + {"Section32.Seg", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Align", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Nreloc", Field, 0}, + {"Section64.Offset", Field, 0}, + {"Section64.Reloff", Field, 0}, + {"Section64.Reserve1", Field, 0}, + {"Section64.Reserve2", Field, 0}, + {"Section64.Reserve3", Field, 0}, + {"Section64.Seg", Field, 0}, + {"Section64.Size", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Align", Field, 0}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Nreloc", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Reloff", Field, 0}, + {"SectionHeader.Seg", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"Segment", Type, 0}, + {"Segment.LoadBytes", Field, 0}, + {"Segment.ReaderAt", Field, 0}, + {"Segment.SegmentHeader", Field, 0}, + {"Segment32", Type, 0}, + {"Segment32.Addr", Field, 0}, + {"Segment32.Cmd", Field, 0}, + {"Segment32.Filesz", Field, 0}, + {"Segment32.Flag", Field, 0}, + {"Segment32.Len", Field, 0}, + {"Segment32.Maxprot", Field, 0}, + {"Segment32.Memsz", Field, 0}, + {"Segment32.Name", Field, 0}, + {"Segment32.Nsect", Field, 0}, + {"Segment32.Offset", Field, 0}, + {"Segment32.Prot", Field, 0}, + {"Segment64", Type, 0}, + {"Segment64.Addr", Field, 0}, + {"Segment64.Cmd", Field, 0}, + {"Segment64.Filesz", Field, 0}, + {"Segment64.Flag", Field, 0}, + {"Segment64.Len", Field, 0}, + {"Segment64.Maxprot", Field, 0}, + {"Segment64.Memsz", Field, 0}, + {"Segment64.Name", Field, 0}, + {"Segment64.Nsect", Field, 0}, + {"Segment64.Offset", Field, 0}, + {"Segment64.Prot", Field, 0}, + {"SegmentHeader", Type, 0}, + {"SegmentHeader.Addr", Field, 0}, + {"SegmentHeader.Cmd", Field, 0}, + {"SegmentHeader.Filesz", Field, 0}, + {"SegmentHeader.Flag", Field, 0}, + {"SegmentHeader.Len", Field, 0}, + {"SegmentHeader.Maxprot", Field, 0}, + {"SegmentHeader.Memsz", Field, 0}, + {"SegmentHeader.Name", Field, 0}, + {"SegmentHeader.Nsect", Field, 0}, + {"SegmentHeader.Offset", Field, 0}, + {"SegmentHeader.Prot", Field, 0}, + {"Symbol", Type, 0}, + {"Symbol.Desc", Field, 0}, + {"Symbol.Name", Field, 0}, + {"Symbol.Sect", Field, 0}, + {"Symbol.Type", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symtab", Type, 0}, + {"Symtab.LoadBytes", Field, 0}, + {"Symtab.Syms", Field, 0}, + {"Symtab.SymtabCmd", Field, 0}, + {"SymtabCmd", Type, 0}, + {"SymtabCmd.Cmd", Field, 0}, + {"SymtabCmd.Len", Field, 0}, + {"SymtabCmd.Nsyms", Field, 0}, + {"SymtabCmd.Stroff", Field, 0}, + {"SymtabCmd.Strsize", Field, 0}, + {"SymtabCmd.Symoff", Field, 0}, + {"Thread", Type, 0}, + {"Thread.Cmd", Field, 0}, + {"Thread.Data", Field, 0}, + {"Thread.Len", Field, 0}, + {"Thread.Type", Field, 0}, + {"Type", Type, 0}, + {"TypeBundle", Const, 3}, + {"TypeDylib", Const, 3}, + {"TypeExec", Const, 0}, + {"TypeObj", Const, 0}, + {"X86_64_RELOC_BRANCH", Const, 10}, + {"X86_64_RELOC_GOT", Const, 10}, + {"X86_64_RELOC_GOT_LOAD", Const, 10}, + {"X86_64_RELOC_SIGNED", Const, 10}, + {"X86_64_RELOC_SIGNED_1", Const, 10}, + {"X86_64_RELOC_SIGNED_2", Const, 10}, + {"X86_64_RELOC_SIGNED_4", Const, 10}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10}, + {"X86_64_RELOC_TLV", Const, 10}, + {"X86_64_RELOC_UNSIGNED", Const, 10}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(StringTable).String", Method, 8}, + {"COFFSymbol", Type, 1}, + {"COFFSymbol.Name", Field, 1}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1}, + {"COFFSymbol.SectionNumber", Field, 1}, + {"COFFSymbol.StorageClass", Field, 1}, + {"COFFSymbol.Type", Field, 1}, + {"COFFSymbol.Value", Field, 1}, + {"COFFSymbolAuxFormat5", Type, 19}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19}, + {"COFFSymbolAuxFormat5.Selection", Field, 19}, + {"COFFSymbolAuxFormat5.Size", Field, 19}, + {"COFFSymbolSize", Const, 1}, + {"DataDirectory", Type, 3}, + {"DataDirectory.Size", Field, 3}, + {"DataDirectory.VirtualAddress", Field, 3}, + {"File", Type, 0}, + {"File.COFFSymbols", Field, 8}, + {"File.FileHeader", Field, 0}, + {"File.OptionalHeader", Field, 3}, + {"File.Sections", Field, 0}, + {"File.StringTable", Field, 8}, + {"File.Symbols", Field, 1}, + {"FileHeader", Type, 0}, + {"FileHeader.Characteristics", Field, 0}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.NumberOfSections", Field, 0}, + {"FileHeader.NumberOfSymbols", Field, 0}, + {"FileHeader.PointerToSymbolTable", Field, 0}, + {"FileHeader.SizeOfOptionalHeader", Field, 0}, + {"FileHeader.TimeDateStamp", Field, 0}, + {"FormatError", Type, 0}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15}, + {"IMAGE_FILE_DLL", Const, 15}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0}, + {"IMAGE_FILE_MACHINE_I386", Const, 0}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_SYSTEM", Const, 15}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15}, + {"IMAGE_SCN_CNT_CODE", Const, 19}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19}, + {"IMAGE_SCN_MEM_READ", Const, 19}, + {"IMAGE_SCN_MEM_WRITE", Const, 19}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15}, + {"ImportDirectory", Type, 0}, + {"ImportDirectory.FirstThunk", Field, 0}, + {"ImportDirectory.ForwarderChain", Field, 0}, + {"ImportDirectory.Name", Field, 0}, + {"ImportDirectory.OriginalFirstThunk", Field, 0}, + {"ImportDirectory.TimeDateStamp", Field, 0}, + {"NewFile", Func, 0}, + {"Open", Func, 0}, + {"OptionalHeader32", Type, 3}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader32.BaseOfCode", Field, 3}, + {"OptionalHeader32.BaseOfData", Field, 3}, + {"OptionalHeader32.CheckSum", Field, 3}, + {"OptionalHeader32.DataDirectory", Field, 3}, + {"OptionalHeader32.DllCharacteristics", Field, 3}, + {"OptionalHeader32.FileAlignment", Field, 3}, + {"OptionalHeader32.ImageBase", Field, 3}, + {"OptionalHeader32.LoaderFlags", Field, 3}, + {"OptionalHeader32.Magic", Field, 3}, + {"OptionalHeader32.MajorImageVersion", Field, 3}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader32.MinorImageVersion", Field, 3}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader32.SectionAlignment", Field, 3}, + {"OptionalHeader32.SizeOfCode", Field, 3}, + {"OptionalHeader32.SizeOfHeaders", Field, 3}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader32.SizeOfImage", Field, 3}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader32.Subsystem", Field, 3}, + {"OptionalHeader32.Win32VersionValue", Field, 3}, + {"OptionalHeader64", Type, 3}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader64.BaseOfCode", Field, 3}, + {"OptionalHeader64.CheckSum", Field, 3}, + {"OptionalHeader64.DataDirectory", Field, 3}, + {"OptionalHeader64.DllCharacteristics", Field, 3}, + {"OptionalHeader64.FileAlignment", Field, 3}, + {"OptionalHeader64.ImageBase", Field, 3}, + {"OptionalHeader64.LoaderFlags", Field, 3}, + {"OptionalHeader64.Magic", Field, 3}, + {"OptionalHeader64.MajorImageVersion", Field, 3}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader64.MinorImageVersion", Field, 3}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader64.SectionAlignment", Field, 3}, + {"OptionalHeader64.SizeOfCode", Field, 3}, + {"OptionalHeader64.SizeOfHeaders", Field, 3}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader64.SizeOfImage", Field, 3}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader64.Subsystem", Field, 3}, + {"OptionalHeader64.Win32VersionValue", Field, 3}, + {"Reloc", Type, 8}, + {"Reloc.SymbolTableIndex", Field, 8}, + {"Reloc.Type", Field, 8}, + {"Reloc.VirtualAddress", Field, 8}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 8}, + {"Section.SectionHeader", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Characteristics", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.NumberOfLineNumbers", Field, 0}, + {"SectionHeader.NumberOfRelocations", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.PointerToLineNumbers", Field, 0}, + {"SectionHeader.PointerToRelocations", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.VirtualAddress", Field, 0}, + {"SectionHeader.VirtualSize", Field, 0}, + {"SectionHeader32", Type, 0}, + {"SectionHeader32.Characteristics", Field, 0}, + {"SectionHeader32.Name", Field, 0}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0}, + {"SectionHeader32.NumberOfRelocations", Field, 0}, + {"SectionHeader32.PointerToLineNumbers", Field, 0}, + {"SectionHeader32.PointerToRawData", Field, 0}, + {"SectionHeader32.PointerToRelocations", Field, 0}, + {"SectionHeader32.SizeOfRawData", Field, 0}, + {"SectionHeader32.VirtualAddress", Field, 0}, + {"SectionHeader32.VirtualSize", Field, 0}, + {"StringTable", Type, 8}, + {"Symbol", Type, 1}, + {"Symbol.Name", Field, 1}, + {"Symbol.SectionNumber", Field, 1}, + {"Symbol.StorageClass", Field, 1}, + {"Symbol.Type", Field, 1}, + {"Symbol.Value", Field, 1}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3}, + {"(*File).Section", Method, 3}, + {"(*File).Symbols", Method, 3}, + {"(*Section).Data", Method, 3}, + {"(*Section).Open", Method, 3}, + {"(Section).ReadAt", Method, 3}, + {"ErrNoSymbols", Var, 18}, + {"File", Type, 3}, + {"File.FileHeader", Field, 3}, + {"File.Sections", Field, 3}, + {"FileHeader", Type, 3}, + {"FileHeader.Bss", Field, 3}, + {"FileHeader.Entry", Field, 3}, + {"FileHeader.HdrSize", Field, 4}, + {"FileHeader.LoadAddress", Field, 4}, + {"FileHeader.Magic", Field, 3}, + {"FileHeader.PtrSize", Field, 3}, + {"Magic386", Const, 3}, + {"Magic64", Const, 3}, + {"MagicAMD64", Const, 3}, + {"MagicARM", Const, 3}, + {"NewFile", Func, 3}, + {"Open", Func, 3}, + {"Section", Type, 3}, + {"Section.ReaderAt", Field, 3}, + {"Section.SectionHeader", Field, 3}, + {"SectionHeader", Type, 3}, + {"SectionHeader.Name", Field, 3}, + {"SectionHeader.Offset", Field, 3}, + {"SectionHeader.Size", Field, 3}, + {"Sym", Type, 3}, + {"Sym.Name", Field, 3}, + {"Sym.Type", Field, 3}, + {"Sym.Value", Field, 3}, + }, + "embed": { + {"(FS).Open", Method, 16}, + {"(FS).ReadDir", Method, 16}, + {"(FS).ReadFile", Method, 16}, + {"FS", Type, 16}, + }, + "encoding": { + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, + {"TextMarshaler", Type, 2}, + {"TextUnmarshaler", Type, 2}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0}, + {"CorruptInputError", Type, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"MaxEncodedLen", Func, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0}, + {"(BitString).RightAlign", Method, 0}, + {"(ObjectIdentifier).Equal", Method, 0}, + {"(ObjectIdentifier).String", Method, 3}, + {"(StructuralError).Error", Method, 0}, + {"(SyntaxError).Error", Method, 0}, + {"BitString", Type, 0}, + {"BitString.BitLength", Field, 0}, + {"BitString.Bytes", Field, 0}, + {"ClassApplication", Const, 6}, + {"ClassContextSpecific", Const, 6}, + {"ClassPrivate", Const, 6}, + {"ClassUniversal", Const, 6}, + {"Enumerated", Type, 0}, + {"Flag", Type, 0}, + {"Marshal", Func, 0}, + {"MarshalWithParams", Func, 10}, + {"NullBytes", Var, 9}, + {"NullRawValue", Var, 9}, + {"ObjectIdentifier", Type, 0}, + {"RawContent", Type, 0}, + {"RawValue", Type, 0}, + {"RawValue.Bytes", Field, 0}, + {"RawValue.Class", Field, 0}, + {"RawValue.FullBytes", Field, 0}, + {"RawValue.IsCompound", Field, 0}, + {"RawValue.Tag", Field, 0}, + {"StructuralError", Type, 0}, + {"StructuralError.Msg", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagBMPString", Const, 14}, + {"TagBitString", Const, 6}, + {"TagBoolean", Const, 6}, + {"TagEnum", Const, 6}, + {"TagGeneralString", Const, 6}, + {"TagGeneralizedTime", Const, 6}, + {"TagIA5String", Const, 6}, + {"TagInteger", Const, 6}, + {"TagNull", Const, 9}, + {"TagNumericString", Const, 10}, + {"TagOID", Const, 6}, + {"TagOctetString", Const, 6}, + {"TagPrintableString", Const, 6}, + {"TagSequence", Const, 6}, + {"TagSet", Const, 6}, + {"TagT61String", Const, 6}, + {"TagUTCTime", Const, 6}, + {"TagUTF8String", Const, 6}, + {"Unmarshal", Func, 0}, + {"UnmarshalWithParams", Func, 0}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).WithPadding", Method, 9}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"HexEncoding", Var, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 9}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 9}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).Strict", Method, 8}, + {"(Encoding).WithPadding", Method, 5}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 5}, + {"RawStdEncoding", Var, 5}, + {"RawURLEncoding", Var, 5}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 5}, + {"URLEncoding", Var, 0}, + }, + "encoding/binary": { + {"AppendByteOrder", Type, 19}, + {"AppendUvarint", Func, 19}, + {"AppendVarint", Func, 19}, + {"BigEndian", Var, 0}, + {"ByteOrder", Type, 0}, + {"LittleEndian", Var, 0}, + {"MaxVarintLen16", Const, 0}, + {"MaxVarintLen32", Const, 0}, + {"MaxVarintLen64", Const, 0}, + {"NativeEndian", Var, 21}, + {"PutUvarint", Func, 0}, + {"PutVarint", Func, 0}, + {"Read", Func, 0}, + {"ReadUvarint", Func, 0}, + {"ReadVarint", Func, 0}, + {"Size", Func, 0}, + {"Uvarint", Func, 0}, + {"Varint", Func, 0}, + {"Write", Func, 0}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Unwrap", Method, 13}, + {"(*Reader).FieldPos", Method, 17}, + {"(*Reader).InputOffset", Method, 19}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAll", Method, 0}, + {"(*Writer).Error", Method, 1}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteAll", Method, 0}, + {"ErrBareQuote", Var, 0}, + {"ErrFieldCount", Var, 0}, + {"ErrQuote", Var, 0}, + {"ErrTrailingComma", Var, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Column", Field, 0}, + {"ParseError.Err", Field, 0}, + {"ParseError.Line", Field, 0}, + {"ParseError.StartLine", Field, 10}, + {"Reader", Type, 0}, + {"Reader.Comma", Field, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.FieldsPerRecord", Field, 0}, + {"Reader.LazyQuotes", Field, 0}, + {"Reader.ReuseRecord", Field, 9}, + {"Reader.TrailingComma", Field, 0}, + {"Reader.TrimLeadingSpace", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Comma", Field, 0}, + {"Writer.UseCRLF", Field, 0}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeValue", Method, 0}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeValue", Method, 0}, + {"CommonType", Type, 0}, + {"CommonType.Id", Field, 0}, + {"CommonType.Name", Field, 0}, + {"Decoder", Type, 0}, + {"Encoder", Type, 0}, + {"GobDecoder", Type, 0}, + {"GobEncoder", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0}, + {"AppendDecode", Func, 22}, + {"AppendEncode", Func, 22}, + {"Decode", Func, 0}, + {"DecodeString", Func, 0}, + {"DecodedLen", Func, 0}, + {"Dump", Func, 0}, + {"Dumper", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToString", Func, 0}, + {"EncodedLen", Func, 0}, + {"ErrLength", Var, 0}, + {"InvalidByteError", Type, 0}, + {"NewDecoder", Func, 10}, + {"NewEncoder", Func, 10}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1}, + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DisallowUnknownFields", Method, 10}, + {"(*Decoder).InputOffset", Method, 14}, + {"(*Decoder).More", Method, 5}, + {"(*Decoder).Token", Method, 5}, + {"(*Decoder).UseNumber", Method, 1}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).SetEscapeHTML", Method, 7}, + {"(*Encoder).SetIndent", Method, 7}, + {"(*InvalidUTF8Error).Error", Method, 0}, + {"(*InvalidUnmarshalError).Error", Method, 0}, + {"(*MarshalerError).Error", Method, 0}, + {"(*MarshalerError).Unwrap", Method, 13}, + {"(*RawMessage).MarshalJSON", Method, 0}, + {"(*RawMessage).UnmarshalJSON", Method, 0}, + {"(*SyntaxError).Error", Method, 0}, + {"(*UnmarshalFieldError).Error", Method, 0}, + {"(*UnmarshalTypeError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(*UnsupportedValueError).Error", Method, 0}, + {"(Delim).String", Method, 5}, + {"(Number).Float64", Method, 1}, + {"(Number).Int64", Method, 1}, + {"(Number).String", Method, 1}, + {"(RawMessage).MarshalJSON", Method, 8}, + {"Compact", Func, 0}, + {"Decoder", Type, 0}, + {"Delim", Type, 5}, + {"Encoder", Type, 0}, + {"HTMLEscape", Func, 0}, + {"Indent", Func, 0}, + {"InvalidUTF8Error", Type, 0}, + {"InvalidUTF8Error.S", Field, 0}, + {"InvalidUnmarshalError", Type, 0}, + {"InvalidUnmarshalError.Type", Field, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 0}, + {"MarshalerError", Type, 0}, + {"MarshalerError.Err", Field, 0}, + {"MarshalerError.Type", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Number", Type, 1}, + {"RawMessage", Type, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Offset", Field, 0}, + {"Token", Type, 5}, + {"Unmarshal", Func, 0}, + {"UnmarshalFieldError", Type, 0}, + {"UnmarshalFieldError.Field", Field, 0}, + {"UnmarshalFieldError.Key", Field, 0}, + {"UnmarshalFieldError.Type", Field, 0}, + {"UnmarshalTypeError", Type, 0}, + {"UnmarshalTypeError.Field", Field, 8}, + {"UnmarshalTypeError.Offset", Field, 5}, + {"UnmarshalTypeError.Struct", Field, 8}, + {"UnmarshalTypeError.Type", Field, 0}, + {"UnmarshalTypeError.Value", Field, 0}, + {"Unmarshaler", Type, 0}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + {"UnsupportedValueError", Type, 0}, + {"UnsupportedValueError.Str", Field, 0}, + {"UnsupportedValueError.Value", Field, 0}, + {"Valid", Func, 9}, + }, + "encoding/pem": { + {"Block", Type, 0}, + {"Block.Bytes", Field, 0}, + {"Block.Headers", Field, 0}, + {"Block.Type", Field, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToMemory", Func, 0}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeElement", Method, 0}, + {"(*Decoder).InputOffset", Method, 4}, + {"(*Decoder).InputPos", Method, 19}, + {"(*Decoder).RawToken", Method, 0}, + {"(*Decoder).Skip", Method, 0}, + {"(*Decoder).Token", Method, 0}, + {"(*Encoder).Close", Method, 20}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeElement", Method, 2}, + {"(*Encoder).EncodeToken", Method, 2}, + {"(*Encoder).Flush", Method, 2}, + {"(*Encoder).Indent", Method, 1}, + {"(*SyntaxError).Error", Method, 0}, + {"(*TagPathError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(CharData).Copy", Method, 0}, + {"(Comment).Copy", Method, 0}, + {"(Directive).Copy", Method, 0}, + {"(ProcInst).Copy", Method, 0}, + {"(StartElement).Copy", Method, 0}, + {"(StartElement).End", Method, 2}, + {"(UnmarshalError).Error", Method, 0}, + {"Attr", Type, 0}, + {"Attr.Name", Field, 0}, + {"Attr.Value", Field, 0}, + {"CharData", Type, 0}, + {"Comment", Type, 0}, + {"CopyToken", Func, 0}, + {"Decoder", Type, 0}, + {"Decoder.AutoClose", Field, 0}, + {"Decoder.CharsetReader", Field, 0}, + {"Decoder.DefaultSpace", Field, 1}, + {"Decoder.Entity", Field, 0}, + {"Decoder.Strict", Field, 0}, + {"Directive", Type, 0}, + {"Encoder", Type, 0}, + {"EndElement", Type, 0}, + {"EndElement.Name", Field, 0}, + {"Escape", Func, 0}, + {"EscapeText", Func, 1}, + {"HTMLAutoClose", Var, 0}, + {"HTMLEntity", Var, 0}, + {"Header", Const, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 2}, + {"MarshalerAttr", Type, 2}, + {"Name", Type, 0}, + {"Name.Local", Field, 0}, + {"Name.Space", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewTokenDecoder", Func, 10}, + {"ProcInst", Type, 0}, + {"ProcInst.Inst", Field, 0}, + {"ProcInst.Target", Field, 0}, + {"StartElement", Type, 0}, + {"StartElement.Attr", Field, 0}, + {"StartElement.Name", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Line", Field, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagPathError", Type, 0}, + {"TagPathError.Field1", Field, 0}, + {"TagPathError.Field2", Field, 0}, + {"TagPathError.Struct", Field, 0}, + {"TagPathError.Tag1", Field, 0}, + {"TagPathError.Tag2", Field, 0}, + {"Token", Type, 0}, + {"TokenReader", Type, 10}, + {"Unmarshal", Func, 0}, + {"UnmarshalError", Type, 0}, + {"Unmarshaler", Type, 2}, + {"UnmarshalerAttr", Type, 2}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + }, + "errors": { + {"As", Func, 13}, + {"ErrUnsupported", Var, 21}, + {"Is", Func, 13}, + {"Join", Func, 20}, + {"New", Func, 0}, + {"Unwrap", Func, 13}, + }, + "expvar": { + {"(*Float).Add", Method, 0}, + {"(*Float).Set", Method, 0}, + {"(*Float).String", Method, 0}, + {"(*Float).Value", Method, 8}, + {"(*Int).Add", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).String", Method, 0}, + {"(*Int).Value", Method, 8}, + {"(*Map).Add", Method, 0}, + {"(*Map).AddFloat", Method, 0}, + {"(*Map).Delete", Method, 12}, + {"(*Map).Do", Method, 0}, + {"(*Map).Get", Method, 0}, + {"(*Map).Init", Method, 0}, + {"(*Map).Set", Method, 0}, + {"(*Map).String", Method, 0}, + {"(*String).Set", Method, 0}, + {"(*String).String", Method, 0}, + {"(*String).Value", Method, 8}, + {"(Func).String", Method, 0}, + {"(Func).Value", Method, 8}, + {"Do", Func, 0}, + {"Float", Type, 0}, + {"Func", Type, 0}, + {"Get", Func, 0}, + {"Handler", Func, 8}, + {"Int", Type, 0}, + {"KeyValue", Type, 0}, + {"KeyValue.Key", Field, 0}, + {"KeyValue.Value", Field, 0}, + {"Map", Type, 0}, + {"NewFloat", Func, 0}, + {"NewInt", Func, 0}, + {"NewMap", Func, 0}, + {"NewString", Func, 0}, + {"Publish", Func, 0}, + {"String", Type, 0}, + {"Var", Type, 0}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0}, + {"(*FlagSet).Args", Method, 0}, + {"(*FlagSet).Bool", Method, 0}, + {"(*FlagSet).BoolFunc", Method, 21}, + {"(*FlagSet).BoolVar", Method, 0}, + {"(*FlagSet).Duration", Method, 0}, + {"(*FlagSet).DurationVar", Method, 0}, + {"(*FlagSet).ErrorHandling", Method, 10}, + {"(*FlagSet).Float64", Method, 0}, + {"(*FlagSet).Float64Var", Method, 0}, + {"(*FlagSet).Func", Method, 16}, + {"(*FlagSet).Init", Method, 0}, + {"(*FlagSet).Int", Method, 0}, + {"(*FlagSet).Int64", Method, 0}, + {"(*FlagSet).Int64Var", Method, 0}, + {"(*FlagSet).IntVar", Method, 0}, + {"(*FlagSet).Lookup", Method, 0}, + {"(*FlagSet).NArg", Method, 0}, + {"(*FlagSet).NFlag", Method, 0}, + {"(*FlagSet).Name", Method, 10}, + {"(*FlagSet).Output", Method, 10}, + {"(*FlagSet).Parse", Method, 0}, + {"(*FlagSet).Parsed", Method, 0}, + {"(*FlagSet).PrintDefaults", Method, 0}, + {"(*FlagSet).Set", Method, 0}, + {"(*FlagSet).SetOutput", Method, 0}, + {"(*FlagSet).String", Method, 0}, + {"(*FlagSet).StringVar", Method, 0}, + {"(*FlagSet).TextVar", Method, 19}, + {"(*FlagSet).Uint", Method, 0}, + {"(*FlagSet).Uint64", Method, 0}, + {"(*FlagSet).Uint64Var", Method, 0}, + {"(*FlagSet).UintVar", Method, 0}, + {"(*FlagSet).Var", Method, 0}, + {"(*FlagSet).Visit", Method, 0}, + {"(*FlagSet).VisitAll", Method, 0}, + {"Arg", Func, 0}, + {"Args", Func, 0}, + {"Bool", Func, 0}, + {"BoolFunc", Func, 21}, + {"BoolVar", Func, 0}, + {"CommandLine", Var, 2}, + {"ContinueOnError", Const, 0}, + {"Duration", Func, 0}, + {"DurationVar", Func, 0}, + {"ErrHelp", Var, 0}, + {"ErrorHandling", Type, 0}, + {"ExitOnError", Const, 0}, + {"Flag", Type, 0}, + {"Flag.DefValue", Field, 0}, + {"Flag.Name", Field, 0}, + {"Flag.Usage", Field, 0}, + {"Flag.Value", Field, 0}, + {"FlagSet", Type, 0}, + {"FlagSet.Usage", Field, 0}, + {"Float64", Func, 0}, + {"Float64Var", Func, 0}, + {"Func", Func, 16}, + {"Getter", Type, 2}, + {"Int", Func, 0}, + {"Int64", Func, 0}, + {"Int64Var", Func, 0}, + {"IntVar", Func, 0}, + {"Lookup", Func, 0}, + {"NArg", Func, 0}, + {"NFlag", Func, 0}, + {"NewFlagSet", Func, 0}, + {"PanicOnError", Const, 0}, + {"Parse", Func, 0}, + {"Parsed", Func, 0}, + {"PrintDefaults", Func, 0}, + {"Set", Func, 0}, + {"String", Func, 0}, + {"StringVar", Func, 0}, + {"TextVar", Func, 19}, + {"Uint", Func, 0}, + {"Uint64", Func, 0}, + {"Uint64Var", Func, 0}, + {"UintVar", Func, 0}, + {"UnquoteUsage", Func, 5}, + {"Usage", Var, 0}, + {"Value", Type, 0}, + {"Var", Func, 0}, + {"Visit", Func, 0}, + {"VisitAll", Func, 0}, + }, + "fmt": { + {"Append", Func, 19}, + {"Appendf", Func, 19}, + {"Appendln", Func, 19}, + {"Errorf", Func, 0}, + {"FormatString", Func, 20}, + {"Formatter", Type, 0}, + {"Fprint", Func, 0}, + {"Fprintf", Func, 0}, + {"Fprintln", Func, 0}, + {"Fscan", Func, 0}, + {"Fscanf", Func, 0}, + {"Fscanln", Func, 0}, + {"GoStringer", Type, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"Scan", Func, 0}, + {"ScanState", Type, 0}, + {"Scanf", Func, 0}, + {"Scanln", Func, 0}, + {"Scanner", Type, 0}, + {"Sprint", Func, 0}, + {"Sprintf", Func, 0}, + {"Sprintln", Func, 0}, + {"Sscan", Func, 0}, + {"Sscanf", Func, 0}, + {"Sscanln", Func, 0}, + {"State", Type, 0}, + {"Stringer", Type, 0}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0}, + {"(*ArrayType).Pos", Method, 0}, + {"(*AssignStmt).End", Method, 0}, + {"(*AssignStmt).Pos", Method, 0}, + {"(*BadDecl).End", Method, 0}, + {"(*BadDecl).Pos", Method, 0}, + {"(*BadExpr).End", Method, 0}, + {"(*BadExpr).Pos", Method, 0}, + {"(*BadStmt).End", Method, 0}, + {"(*BadStmt).Pos", Method, 0}, + {"(*BasicLit).End", Method, 0}, + {"(*BasicLit).Pos", Method, 0}, + {"(*BinaryExpr).End", Method, 0}, + {"(*BinaryExpr).Pos", Method, 0}, + {"(*BlockStmt).End", Method, 0}, + {"(*BlockStmt).Pos", Method, 0}, + {"(*BranchStmt).End", Method, 0}, + {"(*BranchStmt).Pos", Method, 0}, + {"(*CallExpr).End", Method, 0}, + {"(*CallExpr).Pos", Method, 0}, + {"(*CaseClause).End", Method, 0}, + {"(*CaseClause).Pos", Method, 0}, + {"(*ChanType).End", Method, 0}, + {"(*ChanType).Pos", Method, 0}, + {"(*CommClause).End", Method, 0}, + {"(*CommClause).Pos", Method, 0}, + {"(*Comment).End", Method, 0}, + {"(*Comment).Pos", Method, 0}, + {"(*CommentGroup).End", Method, 0}, + {"(*CommentGroup).Pos", Method, 0}, + {"(*CommentGroup).Text", Method, 0}, + {"(*CompositeLit).End", Method, 0}, + {"(*CompositeLit).Pos", Method, 0}, + {"(*DeclStmt).End", Method, 0}, + {"(*DeclStmt).Pos", Method, 0}, + {"(*DeferStmt).End", Method, 0}, + {"(*DeferStmt).Pos", Method, 0}, + {"(*Ellipsis).End", Method, 0}, + {"(*Ellipsis).Pos", Method, 0}, + {"(*EmptyStmt).End", Method, 0}, + {"(*EmptyStmt).Pos", Method, 0}, + {"(*ExprStmt).End", Method, 0}, + {"(*ExprStmt).Pos", Method, 0}, + {"(*Field).End", Method, 0}, + {"(*Field).Pos", Method, 0}, + {"(*FieldList).End", Method, 0}, + {"(*FieldList).NumFields", Method, 0}, + {"(*FieldList).Pos", Method, 0}, + {"(*File).End", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*ForStmt).End", Method, 0}, + {"(*ForStmt).Pos", Method, 0}, + {"(*FuncDecl).End", Method, 0}, + {"(*FuncDecl).Pos", Method, 0}, + {"(*FuncLit).End", Method, 0}, + {"(*FuncLit).Pos", Method, 0}, + {"(*FuncType).End", Method, 0}, + {"(*FuncType).Pos", Method, 0}, + {"(*GenDecl).End", Method, 0}, + {"(*GenDecl).Pos", Method, 0}, + {"(*GoStmt).End", Method, 0}, + {"(*GoStmt).Pos", Method, 0}, + {"(*Ident).End", Method, 0}, + {"(*Ident).IsExported", Method, 0}, + {"(*Ident).Pos", Method, 0}, + {"(*Ident).String", Method, 0}, + {"(*IfStmt).End", Method, 0}, + {"(*IfStmt).Pos", Method, 0}, + {"(*ImportSpec).End", Method, 0}, + {"(*ImportSpec).Pos", Method, 0}, + {"(*IncDecStmt).End", Method, 0}, + {"(*IncDecStmt).Pos", Method, 0}, + {"(*IndexExpr).End", Method, 0}, + {"(*IndexExpr).Pos", Method, 0}, + {"(*IndexListExpr).End", Method, 18}, + {"(*IndexListExpr).Pos", Method, 18}, + {"(*InterfaceType).End", Method, 0}, + {"(*InterfaceType).Pos", Method, 0}, + {"(*KeyValueExpr).End", Method, 0}, + {"(*KeyValueExpr).Pos", Method, 0}, + {"(*LabeledStmt).End", Method, 0}, + {"(*LabeledStmt).Pos", Method, 0}, + {"(*MapType).End", Method, 0}, + {"(*MapType).Pos", Method, 0}, + {"(*Object).Pos", Method, 0}, + {"(*Package).End", Method, 0}, + {"(*Package).Pos", Method, 0}, + {"(*ParenExpr).End", Method, 0}, + {"(*ParenExpr).Pos", Method, 0}, + {"(*RangeStmt).End", Method, 0}, + {"(*RangeStmt).Pos", Method, 0}, + {"(*ReturnStmt).End", Method, 0}, + {"(*ReturnStmt).Pos", Method, 0}, + {"(*Scope).Insert", Method, 0}, + {"(*Scope).Lookup", Method, 0}, + {"(*Scope).String", Method, 0}, + {"(*SelectStmt).End", Method, 0}, + {"(*SelectStmt).Pos", Method, 0}, + {"(*SelectorExpr).End", Method, 0}, + {"(*SelectorExpr).Pos", Method, 0}, + {"(*SendStmt).End", Method, 0}, + {"(*SendStmt).Pos", Method, 0}, + {"(*SliceExpr).End", Method, 0}, + {"(*SliceExpr).Pos", Method, 0}, + {"(*StarExpr).End", Method, 0}, + {"(*StarExpr).Pos", Method, 0}, + {"(*StructType).End", Method, 0}, + {"(*StructType).Pos", Method, 0}, + {"(*SwitchStmt).End", Method, 0}, + {"(*SwitchStmt).Pos", Method, 0}, + {"(*TypeAssertExpr).End", Method, 0}, + {"(*TypeAssertExpr).Pos", Method, 0}, + {"(*TypeSpec).End", Method, 0}, + {"(*TypeSpec).Pos", Method, 0}, + {"(*TypeSwitchStmt).End", Method, 0}, + {"(*TypeSwitchStmt).Pos", Method, 0}, + {"(*UnaryExpr).End", Method, 0}, + {"(*UnaryExpr).Pos", Method, 0}, + {"(*ValueSpec).End", Method, 0}, + {"(*ValueSpec).Pos", Method, 0}, + {"(CommentMap).Comments", Method, 1}, + {"(CommentMap).Filter", Method, 1}, + {"(CommentMap).String", Method, 1}, + {"(CommentMap).Update", Method, 1}, + {"(ObjKind).String", Method, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.Elt", Field, 0}, + {"ArrayType.Lbrack", Field, 0}, + {"ArrayType.Len", Field, 0}, + {"AssignStmt", Type, 0}, + {"AssignStmt.Lhs", Field, 0}, + {"AssignStmt.Rhs", Field, 0}, + {"AssignStmt.Tok", Field, 0}, + {"AssignStmt.TokPos", Field, 0}, + {"Bad", Const, 0}, + {"BadDecl", Type, 0}, + {"BadDecl.From", Field, 0}, + {"BadDecl.To", Field, 0}, + {"BadExpr", Type, 0}, + {"BadExpr.From", Field, 0}, + {"BadExpr.To", Field, 0}, + {"BadStmt", Type, 0}, + {"BadStmt.From", Field, 0}, + {"BadStmt.To", Field, 0}, + {"BasicLit", Type, 0}, + {"BasicLit.Kind", Field, 0}, + {"BasicLit.Value", Field, 0}, + {"BasicLit.ValuePos", Field, 0}, + {"BinaryExpr", Type, 0}, + {"BinaryExpr.Op", Field, 0}, + {"BinaryExpr.OpPos", Field, 0}, + {"BinaryExpr.X", Field, 0}, + {"BinaryExpr.Y", Field, 0}, + {"BlockStmt", Type, 0}, + {"BlockStmt.Lbrace", Field, 0}, + {"BlockStmt.List", Field, 0}, + {"BlockStmt.Rbrace", Field, 0}, + {"BranchStmt", Type, 0}, + {"BranchStmt.Label", Field, 0}, + {"BranchStmt.Tok", Field, 0}, + {"BranchStmt.TokPos", Field, 0}, + {"CallExpr", Type, 0}, + {"CallExpr.Args", Field, 0}, + {"CallExpr.Ellipsis", Field, 0}, + {"CallExpr.Fun", Field, 0}, + {"CallExpr.Lparen", Field, 0}, + {"CallExpr.Rparen", Field, 0}, + {"CaseClause", Type, 0}, + {"CaseClause.Body", Field, 0}, + {"CaseClause.Case", Field, 0}, + {"CaseClause.Colon", Field, 0}, + {"CaseClause.List", Field, 0}, + {"ChanDir", Type, 0}, + {"ChanType", Type, 0}, + {"ChanType.Arrow", Field, 1}, + {"ChanType.Begin", Field, 0}, + {"ChanType.Dir", Field, 0}, + {"ChanType.Value", Field, 0}, + {"CommClause", Type, 0}, + {"CommClause.Body", Field, 0}, + {"CommClause.Case", Field, 0}, + {"CommClause.Colon", Field, 0}, + {"CommClause.Comm", Field, 0}, + {"Comment", Type, 0}, + {"Comment.Slash", Field, 0}, + {"Comment.Text", Field, 0}, + {"CommentGroup", Type, 0}, + {"CommentGroup.List", Field, 0}, + {"CommentMap", Type, 1}, + {"CompositeLit", Type, 0}, + {"CompositeLit.Elts", Field, 0}, + {"CompositeLit.Incomplete", Field, 11}, + {"CompositeLit.Lbrace", Field, 0}, + {"CompositeLit.Rbrace", Field, 0}, + {"CompositeLit.Type", Field, 0}, + {"Con", Const, 0}, + {"Decl", Type, 0}, + {"DeclStmt", Type, 0}, + {"DeclStmt.Decl", Field, 0}, + {"DeferStmt", Type, 0}, + {"DeferStmt.Call", Field, 0}, + {"DeferStmt.Defer", Field, 0}, + {"Ellipsis", Type, 0}, + {"Ellipsis.Ellipsis", Field, 0}, + {"Ellipsis.Elt", Field, 0}, + {"EmptyStmt", Type, 0}, + {"EmptyStmt.Implicit", Field, 5}, + {"EmptyStmt.Semicolon", Field, 0}, + {"Expr", Type, 0}, + {"ExprStmt", Type, 0}, + {"ExprStmt.X", Field, 0}, + {"Field", Type, 0}, + {"Field.Comment", Field, 0}, + {"Field.Doc", Field, 0}, + {"Field.Names", Field, 0}, + {"Field.Tag", Field, 0}, + {"Field.Type", Field, 0}, + {"FieldFilter", Type, 0}, + {"FieldList", Type, 0}, + {"FieldList.Closing", Field, 0}, + {"FieldList.List", Field, 0}, + {"FieldList.Opening", Field, 0}, + {"File", Type, 0}, + {"File.Comments", Field, 0}, + {"File.Decls", Field, 0}, + {"File.Doc", Field, 0}, + {"File.FileEnd", Field, 20}, + {"File.FileStart", Field, 20}, + {"File.GoVersion", Field, 21}, + {"File.Imports", Field, 0}, + {"File.Name", Field, 0}, + {"File.Package", Field, 0}, + {"File.Scope", Field, 0}, + {"File.Unresolved", Field, 0}, + {"FileExports", Func, 0}, + {"Filter", Type, 0}, + {"FilterDecl", Func, 0}, + {"FilterFile", Func, 0}, + {"FilterFuncDuplicates", Const, 0}, + {"FilterImportDuplicates", Const, 0}, + {"FilterPackage", Func, 0}, + {"FilterUnassociatedComments", Const, 0}, + {"ForStmt", Type, 0}, + {"ForStmt.Body", Field, 0}, + {"ForStmt.Cond", Field, 0}, + {"ForStmt.For", Field, 0}, + {"ForStmt.Init", Field, 0}, + {"ForStmt.Post", Field, 0}, + {"Fprint", Func, 0}, + {"Fun", Const, 0}, + {"FuncDecl", Type, 0}, + {"FuncDecl.Body", Field, 0}, + {"FuncDecl.Doc", Field, 0}, + {"FuncDecl.Name", Field, 0}, + {"FuncDecl.Recv", Field, 0}, + {"FuncDecl.Type", Field, 0}, + {"FuncLit", Type, 0}, + {"FuncLit.Body", Field, 0}, + {"FuncLit.Type", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.Func", Field, 0}, + {"FuncType.Params", Field, 0}, + {"FuncType.Results", Field, 0}, + {"FuncType.TypeParams", Field, 18}, + {"GenDecl", Type, 0}, + {"GenDecl.Doc", Field, 0}, + {"GenDecl.Lparen", Field, 0}, + {"GenDecl.Rparen", Field, 0}, + {"GenDecl.Specs", Field, 0}, + {"GenDecl.Tok", Field, 0}, + {"GenDecl.TokPos", Field, 0}, + {"GoStmt", Type, 0}, + {"GoStmt.Call", Field, 0}, + {"GoStmt.Go", Field, 0}, + {"Ident", Type, 0}, + {"Ident.Name", Field, 0}, + {"Ident.NamePos", Field, 0}, + {"Ident.Obj", Field, 0}, + {"IfStmt", Type, 0}, + {"IfStmt.Body", Field, 0}, + {"IfStmt.Cond", Field, 0}, + {"IfStmt.Else", Field, 0}, + {"IfStmt.If", Field, 0}, + {"IfStmt.Init", Field, 0}, + {"ImportSpec", Type, 0}, + {"ImportSpec.Comment", Field, 0}, + {"ImportSpec.Doc", Field, 0}, + {"ImportSpec.EndPos", Field, 0}, + {"ImportSpec.Name", Field, 0}, + {"ImportSpec.Path", Field, 0}, + {"Importer", Type, 0}, + {"IncDecStmt", Type, 0}, + {"IncDecStmt.Tok", Field, 0}, + {"IncDecStmt.TokPos", Field, 0}, + {"IncDecStmt.X", Field, 0}, + {"IndexExpr", Type, 0}, + {"IndexExpr.Index", Field, 0}, + {"IndexExpr.Lbrack", Field, 0}, + {"IndexExpr.Rbrack", Field, 0}, + {"IndexExpr.X", Field, 0}, + {"IndexListExpr", Type, 18}, + {"IndexListExpr.Indices", Field, 18}, + {"IndexListExpr.Lbrack", Field, 18}, + {"IndexListExpr.Rbrack", Field, 18}, + {"IndexListExpr.X", Field, 18}, + {"Inspect", Func, 0}, + {"InterfaceType", Type, 0}, + {"InterfaceType.Incomplete", Field, 0}, + {"InterfaceType.Interface", Field, 0}, + {"InterfaceType.Methods", Field, 0}, + {"IsExported", Func, 0}, + {"IsGenerated", Func, 21}, + {"KeyValueExpr", Type, 0}, + {"KeyValueExpr.Colon", Field, 0}, + {"KeyValueExpr.Key", Field, 0}, + {"KeyValueExpr.Value", Field, 0}, + {"LabeledStmt", Type, 0}, + {"LabeledStmt.Colon", Field, 0}, + {"LabeledStmt.Label", Field, 0}, + {"LabeledStmt.Stmt", Field, 0}, + {"Lbl", Const, 0}, + {"MapType", Type, 0}, + {"MapType.Key", Field, 0}, + {"MapType.Map", Field, 0}, + {"MapType.Value", Field, 0}, + {"MergeMode", Type, 0}, + {"MergePackageFiles", Func, 0}, + {"NewCommentMap", Func, 1}, + {"NewIdent", Func, 0}, + {"NewObj", Func, 0}, + {"NewPackage", Func, 0}, + {"NewScope", Func, 0}, + {"Node", Type, 0}, + {"NotNilFilter", Func, 0}, + {"ObjKind", Type, 0}, + {"Object", Type, 0}, + {"Object.Data", Field, 0}, + {"Object.Decl", Field, 0}, + {"Object.Kind", Field, 0}, + {"Object.Name", Field, 0}, + {"Object.Type", Field, 0}, + {"Package", Type, 0}, + {"Package.Files", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Scope", Field, 0}, + {"PackageExports", Func, 0}, + {"ParenExpr", Type, 0}, + {"ParenExpr.Lparen", Field, 0}, + {"ParenExpr.Rparen", Field, 0}, + {"ParenExpr.X", Field, 0}, + {"Pkg", Const, 0}, + {"Print", Func, 0}, + {"RECV", Const, 0}, + {"RangeStmt", Type, 0}, + {"RangeStmt.Body", Field, 0}, + {"RangeStmt.For", Field, 0}, + {"RangeStmt.Key", Field, 0}, + {"RangeStmt.Range", Field, 20}, + {"RangeStmt.Tok", Field, 0}, + {"RangeStmt.TokPos", Field, 0}, + {"RangeStmt.Value", Field, 0}, + {"RangeStmt.X", Field, 0}, + {"ReturnStmt", Type, 0}, + {"ReturnStmt.Results", Field, 0}, + {"ReturnStmt.Return", Field, 0}, + {"SEND", Const, 0}, + {"Scope", Type, 0}, + {"Scope.Objects", Field, 0}, + {"Scope.Outer", Field, 0}, + {"SelectStmt", Type, 0}, + {"SelectStmt.Body", Field, 0}, + {"SelectStmt.Select", Field, 0}, + {"SelectorExpr", Type, 0}, + {"SelectorExpr.Sel", Field, 0}, + {"SelectorExpr.X", Field, 0}, + {"SendStmt", Type, 0}, + {"SendStmt.Arrow", Field, 0}, + {"SendStmt.Chan", Field, 0}, + {"SendStmt.Value", Field, 0}, + {"SliceExpr", Type, 0}, + {"SliceExpr.High", Field, 0}, + {"SliceExpr.Lbrack", Field, 0}, + {"SliceExpr.Low", Field, 0}, + {"SliceExpr.Max", Field, 2}, + {"SliceExpr.Rbrack", Field, 0}, + {"SliceExpr.Slice3", Field, 2}, + {"SliceExpr.X", Field, 0}, + {"SortImports", Func, 0}, + {"Spec", Type, 0}, + {"StarExpr", Type, 0}, + {"StarExpr.Star", Field, 0}, + {"StarExpr.X", Field, 0}, + {"Stmt", Type, 0}, + {"StructType", Type, 0}, + {"StructType.Fields", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Struct", Field, 0}, + {"SwitchStmt", Type, 0}, + {"SwitchStmt.Body", Field, 0}, + {"SwitchStmt.Init", Field, 0}, + {"SwitchStmt.Switch", Field, 0}, + {"SwitchStmt.Tag", Field, 0}, + {"Typ", Const, 0}, + {"TypeAssertExpr", Type, 0}, + {"TypeAssertExpr.Lparen", Field, 2}, + {"TypeAssertExpr.Rparen", Field, 2}, + {"TypeAssertExpr.Type", Field, 0}, + {"TypeAssertExpr.X", Field, 0}, + {"TypeSpec", Type, 0}, + {"TypeSpec.Assign", Field, 9}, + {"TypeSpec.Comment", Field, 0}, + {"TypeSpec.Doc", Field, 0}, + {"TypeSpec.Name", Field, 0}, + {"TypeSpec.Type", Field, 0}, + {"TypeSpec.TypeParams", Field, 18}, + {"TypeSwitchStmt", Type, 0}, + {"TypeSwitchStmt.Assign", Field, 0}, + {"TypeSwitchStmt.Body", Field, 0}, + {"TypeSwitchStmt.Init", Field, 0}, + {"TypeSwitchStmt.Switch", Field, 0}, + {"UnaryExpr", Type, 0}, + {"UnaryExpr.Op", Field, 0}, + {"UnaryExpr.OpPos", Field, 0}, + {"UnaryExpr.X", Field, 0}, + {"Unparen", Func, 22}, + {"ValueSpec", Type, 0}, + {"ValueSpec.Comment", Field, 0}, + {"ValueSpec.Doc", Field, 0}, + {"ValueSpec.Names", Field, 0}, + {"ValueSpec.Type", Field, 0}, + {"ValueSpec.Values", Field, 0}, + {"Var", Const, 0}, + {"Visitor", Type, 0}, + {"Walk", Func, 0}, + }, + "go/build": { + {"(*Context).Import", Method, 0}, + {"(*Context).ImportDir", Method, 0}, + {"(*Context).MatchFile", Method, 2}, + {"(*Context).SrcDirs", Method, 0}, + {"(*MultiplePackageError).Error", Method, 4}, + {"(*NoGoError).Error", Method, 0}, + {"(*Package).IsCommand", Method, 0}, + {"AllowBinary", Const, 0}, + {"ArchChar", Func, 0}, + {"Context", Type, 0}, + {"Context.BuildTags", Field, 0}, + {"Context.CgoEnabled", Field, 0}, + {"Context.Compiler", Field, 0}, + {"Context.Dir", Field, 14}, + {"Context.GOARCH", Field, 0}, + {"Context.GOOS", Field, 0}, + {"Context.GOPATH", Field, 0}, + {"Context.GOROOT", Field, 0}, + {"Context.HasSubdir", Field, 0}, + {"Context.InstallSuffix", Field, 1}, + {"Context.IsAbsPath", Field, 0}, + {"Context.IsDir", Field, 0}, + {"Context.JoinPath", Field, 0}, + {"Context.OpenFile", Field, 0}, + {"Context.ReadDir", Field, 0}, + {"Context.ReleaseTags", Field, 1}, + {"Context.SplitPathList", Field, 0}, + {"Context.ToolTags", Field, 17}, + {"Context.UseAllFiles", Field, 0}, + {"Default", Var, 0}, + {"Directive", Type, 21}, + {"Directive.Pos", Field, 21}, + {"Directive.Text", Field, 21}, + {"FindOnly", Const, 0}, + {"IgnoreVendor", Const, 6}, + {"Import", Func, 0}, + {"ImportComment", Const, 4}, + {"ImportDir", Func, 0}, + {"ImportMode", Type, 0}, + {"IsLocalImport", Func, 0}, + {"MultiplePackageError", Type, 4}, + {"MultiplePackageError.Dir", Field, 4}, + {"MultiplePackageError.Files", Field, 4}, + {"MultiplePackageError.Packages", Field, 4}, + {"NoGoError", Type, 0}, + {"NoGoError.Dir", Field, 0}, + {"Package", Type, 0}, + {"Package.AllTags", Field, 2}, + {"Package.BinDir", Field, 0}, + {"Package.BinaryOnly", Field, 7}, + {"Package.CFiles", Field, 0}, + {"Package.CXXFiles", Field, 2}, + {"Package.CgoCFLAGS", Field, 0}, + {"Package.CgoCPPFLAGS", Field, 2}, + {"Package.CgoCXXFLAGS", Field, 2}, + {"Package.CgoFFLAGS", Field, 7}, + {"Package.CgoFiles", Field, 0}, + {"Package.CgoLDFLAGS", Field, 0}, + {"Package.CgoPkgConfig", Field, 0}, + {"Package.ConflictDir", Field, 2}, + {"Package.Dir", Field, 0}, + {"Package.Directives", Field, 21}, + {"Package.Doc", Field, 0}, + {"Package.EmbedPatternPos", Field, 16}, + {"Package.EmbedPatterns", Field, 16}, + {"Package.FFiles", Field, 7}, + {"Package.GoFiles", Field, 0}, + {"Package.Goroot", Field, 0}, + {"Package.HFiles", Field, 0}, + {"Package.IgnoredGoFiles", Field, 1}, + {"Package.IgnoredOtherFiles", Field, 16}, + {"Package.ImportComment", Field, 4}, + {"Package.ImportPath", Field, 0}, + {"Package.ImportPos", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.InvalidGoFiles", Field, 6}, + {"Package.MFiles", Field, 3}, + {"Package.Name", Field, 0}, + {"Package.PkgObj", Field, 0}, + {"Package.PkgRoot", Field, 0}, + {"Package.PkgTargetRoot", Field, 5}, + {"Package.Root", Field, 0}, + {"Package.SFiles", Field, 0}, + {"Package.SrcRoot", Field, 0}, + {"Package.SwigCXXFiles", Field, 1}, + {"Package.SwigFiles", Field, 1}, + {"Package.SysoFiles", Field, 0}, + {"Package.TestDirectives", Field, 21}, + {"Package.TestEmbedPatternPos", Field, 16}, + {"Package.TestEmbedPatterns", Field, 16}, + {"Package.TestGoFiles", Field, 0}, + {"Package.TestImportPos", Field, 0}, + {"Package.TestImports", Field, 0}, + {"Package.XTestDirectives", Field, 21}, + {"Package.XTestEmbedPatternPos", Field, 16}, + {"Package.XTestEmbedPatterns", Field, 16}, + {"Package.XTestGoFiles", Field, 0}, + {"Package.XTestImportPos", Field, 0}, + {"Package.XTestImports", Field, 0}, + {"ToolDir", Var, 0}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16}, + {"(*AndExpr).String", Method, 16}, + {"(*NotExpr).Eval", Method, 16}, + {"(*NotExpr).String", Method, 16}, + {"(*OrExpr).Eval", Method, 16}, + {"(*OrExpr).String", Method, 16}, + {"(*SyntaxError).Error", Method, 16}, + {"(*TagExpr).Eval", Method, 16}, + {"(*TagExpr).String", Method, 16}, + {"AndExpr", Type, 16}, + {"AndExpr.X", Field, 16}, + {"AndExpr.Y", Field, 16}, + {"Expr", Type, 16}, + {"GoVersion", Func, 21}, + {"IsGoBuild", Func, 16}, + {"IsPlusBuild", Func, 16}, + {"NotExpr", Type, 16}, + {"NotExpr.X", Field, 16}, + {"OrExpr", Type, 16}, + {"OrExpr.X", Field, 16}, + {"OrExpr.Y", Field, 16}, + {"Parse", Func, 16}, + {"PlusBuildLines", Func, 16}, + {"SyntaxError", Type, 16}, + {"SyntaxError.Err", Field, 16}, + {"SyntaxError.Offset", Field, 16}, + {"TagExpr", Type, 16}, + {"TagExpr.Tag", Field, 16}, + }, + "go/constant": { + {"(Kind).String", Method, 18}, + {"BinaryOp", Func, 5}, + {"BitLen", Func, 5}, + {"Bool", Const, 5}, + {"BoolVal", Func, 5}, + {"Bytes", Func, 5}, + {"Compare", Func, 5}, + {"Complex", Const, 5}, + {"Denom", Func, 5}, + {"Float", Const, 5}, + {"Float32Val", Func, 5}, + {"Float64Val", Func, 5}, + {"Imag", Func, 5}, + {"Int", Const, 5}, + {"Int64Val", Func, 5}, + {"Kind", Type, 5}, + {"Make", Func, 13}, + {"MakeBool", Func, 5}, + {"MakeFloat64", Func, 5}, + {"MakeFromBytes", Func, 5}, + {"MakeFromLiteral", Func, 5}, + {"MakeImag", Func, 5}, + {"MakeInt64", Func, 5}, + {"MakeString", Func, 5}, + {"MakeUint64", Func, 5}, + {"MakeUnknown", Func, 5}, + {"Num", Func, 5}, + {"Real", Func, 5}, + {"Shift", Func, 5}, + {"Sign", Func, 5}, + {"String", Const, 5}, + {"StringVal", Func, 5}, + {"ToComplex", Func, 6}, + {"ToFloat", Func, 6}, + {"ToInt", Func, 6}, + {"Uint64Val", Func, 5}, + {"UnaryOp", Func, 5}, + {"Unknown", Const, 5}, + {"Val", Func, 13}, + {"Value", Type, 5}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0}, + {"(*Package).HTML", Method, 19}, + {"(*Package).Markdown", Method, 19}, + {"(*Package).Parser", Method, 19}, + {"(*Package).Printer", Method, 19}, + {"(*Package).Synopsis", Method, 19}, + {"(*Package).Text", Method, 19}, + {"AllDecls", Const, 0}, + {"AllMethods", Const, 0}, + {"Example", Type, 0}, + {"Example.Code", Field, 0}, + {"Example.Comments", Field, 0}, + {"Example.Doc", Field, 0}, + {"Example.EmptyOutput", Field, 1}, + {"Example.Name", Field, 0}, + {"Example.Order", Field, 1}, + {"Example.Output", Field, 0}, + {"Example.Play", Field, 1}, + {"Example.Suffix", Field, 14}, + {"Example.Unordered", Field, 7}, + {"Examples", Func, 0}, + {"Filter", Type, 0}, + {"Func", Type, 0}, + {"Func.Decl", Field, 0}, + {"Func.Doc", Field, 0}, + {"Func.Examples", Field, 14}, + {"Func.Level", Field, 0}, + {"Func.Name", Field, 0}, + {"Func.Orig", Field, 0}, + {"Func.Recv", Field, 0}, + {"IllegalPrefixes", Var, 1}, + {"IsPredeclared", Func, 8}, + {"Mode", Type, 0}, + {"New", Func, 0}, + {"NewFromFiles", Func, 14}, + {"Note", Type, 1}, + {"Note.Body", Field, 1}, + {"Note.End", Field, 1}, + {"Note.Pos", Field, 1}, + {"Note.UID", Field, 1}, + {"Package", Type, 0}, + {"Package.Bugs", Field, 0}, + {"Package.Consts", Field, 0}, + {"Package.Doc", Field, 0}, + {"Package.Examples", Field, 14}, + {"Package.Filenames", Field, 0}, + {"Package.Funcs", Field, 0}, + {"Package.ImportPath", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Notes", Field, 1}, + {"Package.Types", Field, 0}, + {"Package.Vars", Field, 0}, + {"PreserveAST", Const, 12}, + {"Synopsis", Func, 0}, + {"ToHTML", Func, 0}, + {"ToText", Func, 0}, + {"Type", Type, 0}, + {"Type.Consts", Field, 0}, + {"Type.Decl", Field, 0}, + {"Type.Doc", Field, 0}, + {"Type.Examples", Field, 14}, + {"Type.Funcs", Field, 0}, + {"Type.Methods", Field, 0}, + {"Type.Name", Field, 0}, + {"Type.Vars", Field, 0}, + {"Value", Type, 0}, + {"Value.Decl", Field, 0}, + {"Value.Doc", Field, 0}, + {"Value.Names", Field, 0}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19}, + {"(*Heading).DefaultID", Method, 19}, + {"(*List).BlankBefore", Method, 19}, + {"(*List).BlankBetween", Method, 19}, + {"(*Parser).Parse", Method, 19}, + {"(*Printer).Comment", Method, 19}, + {"(*Printer).HTML", Method, 19}, + {"(*Printer).Markdown", Method, 19}, + {"(*Printer).Text", Method, 19}, + {"Block", Type, 19}, + {"Code", Type, 19}, + {"Code.Text", Field, 19}, + {"DefaultLookupPackage", Func, 19}, + {"Doc", Type, 19}, + {"Doc.Content", Field, 19}, + {"Doc.Links", Field, 19}, + {"DocLink", Type, 19}, + {"DocLink.ImportPath", Field, 19}, + {"DocLink.Name", Field, 19}, + {"DocLink.Recv", Field, 19}, + {"DocLink.Text", Field, 19}, + {"Heading", Type, 19}, + {"Heading.Text", Field, 19}, + {"Italic", Type, 19}, + {"Link", Type, 19}, + {"Link.Auto", Field, 19}, + {"Link.Text", Field, 19}, + {"Link.URL", Field, 19}, + {"LinkDef", Type, 19}, + {"LinkDef.Text", Field, 19}, + {"LinkDef.URL", Field, 19}, + {"LinkDef.Used", Field, 19}, + {"List", Type, 19}, + {"List.ForceBlankBefore", Field, 19}, + {"List.ForceBlankBetween", Field, 19}, + {"List.Items", Field, 19}, + {"ListItem", Type, 19}, + {"ListItem.Content", Field, 19}, + {"ListItem.Number", Field, 19}, + {"Paragraph", Type, 19}, + {"Paragraph.Text", Field, 19}, + {"Parser", Type, 19}, + {"Parser.LookupPackage", Field, 19}, + {"Parser.LookupSym", Field, 19}, + {"Parser.Words", Field, 19}, + {"Plain", Type, 19}, + {"Printer", Type, 19}, + {"Printer.DocLinkBaseURL", Field, 19}, + {"Printer.DocLinkURL", Field, 19}, + {"Printer.HeadingID", Field, 19}, + {"Printer.HeadingLevel", Field, 19}, + {"Printer.TextCodePrefix", Field, 19}, + {"Printer.TextPrefix", Field, 19}, + {"Printer.TextWidth", Field, 19}, + {"Text", Type, 19}, + }, + "go/format": { + {"Node", Func, 1}, + {"Source", Func, 1}, + }, + "go/importer": { + {"Default", Func, 5}, + {"For", Func, 5}, + {"ForCompiler", Func, 12}, + {"Lookup", Type, 5}, + }, + "go/parser": { + {"AllErrors", Const, 1}, + {"DeclarationErrors", Const, 0}, + {"ImportsOnly", Const, 0}, + {"Mode", Type, 0}, + {"PackageClauseOnly", Const, 0}, + {"ParseComments", Const, 0}, + {"ParseDir", Func, 0}, + {"ParseExpr", Func, 0}, + {"ParseExprFrom", Func, 5}, + {"ParseFile", Func, 0}, + {"SkipObjectResolution", Const, 17}, + {"SpuriousErrors", Const, 0}, + {"Trace", Const, 0}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0}, + {"CommentedNode", Type, 0}, + {"CommentedNode.Comments", Field, 0}, + {"CommentedNode.Node", Field, 0}, + {"Config", Type, 0}, + {"Config.Indent", Field, 1}, + {"Config.Mode", Field, 0}, + {"Config.Tabwidth", Field, 0}, + {"Fprint", Func, 0}, + {"Mode", Type, 0}, + {"RawFormat", Const, 0}, + {"SourcePos", Const, 0}, + {"TabIndent", Const, 0}, + {"UseSpaces", Const, 0}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0}, + {"(*ErrorList).RemoveMultiples", Method, 0}, + {"(*ErrorList).Reset", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(Error).Error", Method, 0}, + {"(ErrorList).Err", Method, 0}, + {"(ErrorList).Error", Method, 0}, + {"(ErrorList).Len", Method, 0}, + {"(ErrorList).Less", Method, 0}, + {"(ErrorList).Sort", Method, 0}, + {"(ErrorList).Swap", Method, 0}, + {"Error", Type, 0}, + {"Error.Msg", Field, 0}, + {"Error.Pos", Field, 0}, + {"ErrorHandler", Type, 0}, + {"ErrorList", Type, 0}, + {"Mode", Type, 0}, + {"PrintError", Func, 0}, + {"ScanComments", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.ErrorCount", Field, 0}, + }, + "go/token": { + {"(*File).AddLine", Method, 0}, + {"(*File).AddLineColumnInfo", Method, 11}, + {"(*File).AddLineInfo", Method, 0}, + {"(*File).Base", Method, 0}, + {"(*File).Line", Method, 0}, + {"(*File).LineCount", Method, 0}, + {"(*File).LineStart", Method, 12}, + {"(*File).Lines", Method, 21}, + {"(*File).MergeLine", Method, 2}, + {"(*File).Name", Method, 0}, + {"(*File).Offset", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*File).Position", Method, 0}, + {"(*File).PositionFor", Method, 4}, + {"(*File).SetLines", Method, 0}, + {"(*File).SetLinesForContent", Method, 0}, + {"(*File).Size", Method, 0}, + {"(*FileSet).AddFile", Method, 0}, + {"(*FileSet).Base", Method, 0}, + {"(*FileSet).File", Method, 0}, + {"(*FileSet).Iterate", Method, 0}, + {"(*FileSet).Position", Method, 0}, + {"(*FileSet).PositionFor", Method, 4}, + {"(*FileSet).Read", Method, 0}, + {"(*FileSet).RemoveFile", Method, 20}, + {"(*FileSet).Write", Method, 0}, + {"(*Position).IsValid", Method, 0}, + {"(Pos).IsValid", Method, 0}, + {"(Position).String", Method, 0}, + {"(Token).IsKeyword", Method, 0}, + {"(Token).IsLiteral", Method, 0}, + {"(Token).IsOperator", Method, 0}, + {"(Token).Precedence", Method, 0}, + {"(Token).String", Method, 0}, + {"ADD", Const, 0}, + {"ADD_ASSIGN", Const, 0}, + {"AND", Const, 0}, + {"AND_ASSIGN", Const, 0}, + {"AND_NOT", Const, 0}, + {"AND_NOT_ASSIGN", Const, 0}, + {"ARROW", Const, 0}, + {"ASSIGN", Const, 0}, + {"BREAK", Const, 0}, + {"CASE", Const, 0}, + {"CHAN", Const, 0}, + {"CHAR", Const, 0}, + {"COLON", Const, 0}, + {"COMMA", Const, 0}, + {"COMMENT", Const, 0}, + {"CONST", Const, 0}, + {"CONTINUE", Const, 0}, + {"DEC", Const, 0}, + {"DEFAULT", Const, 0}, + {"DEFER", Const, 0}, + {"DEFINE", Const, 0}, + {"ELLIPSIS", Const, 0}, + {"ELSE", Const, 0}, + {"EOF", Const, 0}, + {"EQL", Const, 0}, + {"FALLTHROUGH", Const, 0}, + {"FLOAT", Const, 0}, + {"FOR", Const, 0}, + {"FUNC", Const, 0}, + {"File", Type, 0}, + {"FileSet", Type, 0}, + {"GEQ", Const, 0}, + {"GO", Const, 0}, + {"GOTO", Const, 0}, + {"GTR", Const, 0}, + {"HighestPrec", Const, 0}, + {"IDENT", Const, 0}, + {"IF", Const, 0}, + {"ILLEGAL", Const, 0}, + {"IMAG", Const, 0}, + {"IMPORT", Const, 0}, + {"INC", Const, 0}, + {"INT", Const, 0}, + {"INTERFACE", Const, 0}, + {"IsExported", Func, 13}, + {"IsIdentifier", Func, 13}, + {"IsKeyword", Func, 13}, + {"LAND", Const, 0}, + {"LBRACE", Const, 0}, + {"LBRACK", Const, 0}, + {"LEQ", Const, 0}, + {"LOR", Const, 0}, + {"LPAREN", Const, 0}, + {"LSS", Const, 0}, + {"Lookup", Func, 0}, + {"LowestPrec", Const, 0}, + {"MAP", Const, 0}, + {"MUL", Const, 0}, + {"MUL_ASSIGN", Const, 0}, + {"NEQ", Const, 0}, + {"NOT", Const, 0}, + {"NewFileSet", Func, 0}, + {"NoPos", Const, 0}, + {"OR", Const, 0}, + {"OR_ASSIGN", Const, 0}, + {"PACKAGE", Const, 0}, + {"PERIOD", Const, 0}, + {"Pos", Type, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"QUO", Const, 0}, + {"QUO_ASSIGN", Const, 0}, + {"RANGE", Const, 0}, + {"RBRACE", Const, 0}, + {"RBRACK", Const, 0}, + {"REM", Const, 0}, + {"REM_ASSIGN", Const, 0}, + {"RETURN", Const, 0}, + {"RPAREN", Const, 0}, + {"SELECT", Const, 0}, + {"SEMICOLON", Const, 0}, + {"SHL", Const, 0}, + {"SHL_ASSIGN", Const, 0}, + {"SHR", Const, 0}, + {"SHR_ASSIGN", Const, 0}, + {"STRING", Const, 0}, + {"STRUCT", Const, 0}, + {"SUB", Const, 0}, + {"SUB_ASSIGN", Const, 0}, + {"SWITCH", Const, 0}, + {"TILDE", Const, 18}, + {"TYPE", Const, 0}, + {"Token", Type, 0}, + {"UnaryPrec", Const, 0}, + {"VAR", Const, 0}, + {"XOR", Const, 0}, + {"XOR_ASSIGN", Const, 0}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22}, + {"(*Alias).String", Method, 22}, + {"(*Alias).Underlying", Method, 22}, + {"(*ArgumentError).Error", Method, 18}, + {"(*ArgumentError).Unwrap", Method, 18}, + {"(*Array).Elem", Method, 5}, + {"(*Array).Len", Method, 5}, + {"(*Array).String", Method, 5}, + {"(*Array).Underlying", Method, 5}, + {"(*Basic).Info", Method, 5}, + {"(*Basic).Kind", Method, 5}, + {"(*Basic).Name", Method, 5}, + {"(*Basic).String", Method, 5}, + {"(*Basic).Underlying", Method, 5}, + {"(*Builtin).Exported", Method, 5}, + {"(*Builtin).Id", Method, 5}, + {"(*Builtin).Name", Method, 5}, + {"(*Builtin).Parent", Method, 5}, + {"(*Builtin).Pkg", Method, 5}, + {"(*Builtin).Pos", Method, 5}, + {"(*Builtin).String", Method, 5}, + {"(*Builtin).Type", Method, 5}, + {"(*Chan).Dir", Method, 5}, + {"(*Chan).Elem", Method, 5}, + {"(*Chan).String", Method, 5}, + {"(*Chan).Underlying", Method, 5}, + {"(*Checker).Files", Method, 5}, + {"(*Config).Check", Method, 5}, + {"(*Const).Exported", Method, 5}, + {"(*Const).Id", Method, 5}, + {"(*Const).Name", Method, 5}, + {"(*Const).Parent", Method, 5}, + {"(*Const).Pkg", Method, 5}, + {"(*Const).Pos", Method, 5}, + {"(*Const).String", Method, 5}, + {"(*Const).Type", Method, 5}, + {"(*Const).Val", Method, 5}, + {"(*Func).Exported", Method, 5}, + {"(*Func).FullName", Method, 5}, + {"(*Func).Id", Method, 5}, + {"(*Func).Name", Method, 5}, + {"(*Func).Origin", Method, 19}, + {"(*Func).Parent", Method, 5}, + {"(*Func).Pkg", Method, 5}, + {"(*Func).Pos", Method, 5}, + {"(*Func).Scope", Method, 5}, + {"(*Func).String", Method, 5}, + {"(*Func).Type", Method, 5}, + {"(*Info).ObjectOf", Method, 5}, + {"(*Info).PkgNameOf", Method, 22}, + {"(*Info).TypeOf", Method, 5}, + {"(*Initializer).String", Method, 5}, + {"(*Interface).Complete", Method, 5}, + {"(*Interface).Embedded", Method, 5}, + {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).IsComparable", Method, 18}, + {"(*Interface).IsImplicit", Method, 18}, + {"(*Interface).IsMethodSet", Method, 18}, + {"(*Interface).MarkImplicit", Method, 18}, + {"(*Interface).Method", Method, 5}, + {"(*Interface).NumEmbeddeds", Method, 5}, + {"(*Interface).NumExplicitMethods", Method, 5}, + {"(*Interface).NumMethods", Method, 5}, + {"(*Interface).String", Method, 5}, + {"(*Interface).Underlying", Method, 5}, + {"(*Label).Exported", Method, 5}, + {"(*Label).Id", Method, 5}, + {"(*Label).Name", Method, 5}, + {"(*Label).Parent", Method, 5}, + {"(*Label).Pkg", Method, 5}, + {"(*Label).Pos", Method, 5}, + {"(*Label).String", Method, 5}, + {"(*Label).Type", Method, 5}, + {"(*Map).Elem", Method, 5}, + {"(*Map).Key", Method, 5}, + {"(*Map).String", Method, 5}, + {"(*Map).Underlying", Method, 5}, + {"(*MethodSet).At", Method, 5}, + {"(*MethodSet).Len", Method, 5}, + {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, + {"(*Named).NumMethods", Method, 5}, + {"(*Named).Obj", Method, 5}, + {"(*Named).Origin", Method, 18}, + {"(*Named).SetTypeParams", Method, 18}, + {"(*Named).SetUnderlying", Method, 5}, + {"(*Named).String", Method, 5}, + {"(*Named).TypeArgs", Method, 18}, + {"(*Named).TypeParams", Method, 18}, + {"(*Named).Underlying", Method, 5}, + {"(*Nil).Exported", Method, 5}, + {"(*Nil).Id", Method, 5}, + {"(*Nil).Name", Method, 5}, + {"(*Nil).Parent", Method, 5}, + {"(*Nil).Pkg", Method, 5}, + {"(*Nil).Pos", Method, 5}, + {"(*Nil).String", Method, 5}, + {"(*Nil).Type", Method, 5}, + {"(*Package).Complete", Method, 5}, + {"(*Package).GoVersion", Method, 21}, + {"(*Package).Imports", Method, 5}, + {"(*Package).MarkComplete", Method, 5}, + {"(*Package).Name", Method, 5}, + {"(*Package).Path", Method, 5}, + {"(*Package).Scope", Method, 5}, + {"(*Package).SetImports", Method, 5}, + {"(*Package).SetName", Method, 6}, + {"(*Package).String", Method, 5}, + {"(*PkgName).Exported", Method, 5}, + {"(*PkgName).Id", Method, 5}, + {"(*PkgName).Imported", Method, 5}, + {"(*PkgName).Name", Method, 5}, + {"(*PkgName).Parent", Method, 5}, + {"(*PkgName).Pkg", Method, 5}, + {"(*PkgName).Pos", Method, 5}, + {"(*PkgName).String", Method, 5}, + {"(*PkgName).Type", Method, 5}, + {"(*Pointer).Elem", Method, 5}, + {"(*Pointer).String", Method, 5}, + {"(*Pointer).Underlying", Method, 5}, + {"(*Scope).Child", Method, 5}, + {"(*Scope).Contains", Method, 5}, + {"(*Scope).End", Method, 5}, + {"(*Scope).Innermost", Method, 5}, + {"(*Scope).Insert", Method, 5}, + {"(*Scope).Len", Method, 5}, + {"(*Scope).Lookup", Method, 5}, + {"(*Scope).LookupParent", Method, 5}, + {"(*Scope).Names", Method, 5}, + {"(*Scope).NumChildren", Method, 5}, + {"(*Scope).Parent", Method, 5}, + {"(*Scope).Pos", Method, 5}, + {"(*Scope).String", Method, 5}, + {"(*Scope).WriteTo", Method, 5}, + {"(*Selection).Index", Method, 5}, + {"(*Selection).Indirect", Method, 5}, + {"(*Selection).Kind", Method, 5}, + {"(*Selection).Obj", Method, 5}, + {"(*Selection).Recv", Method, 5}, + {"(*Selection).String", Method, 5}, + {"(*Selection).Type", Method, 5}, + {"(*Signature).Params", Method, 5}, + {"(*Signature).Recv", Method, 5}, + {"(*Signature).RecvTypeParams", Method, 18}, + {"(*Signature).Results", Method, 5}, + {"(*Signature).String", Method, 5}, + {"(*Signature).TypeParams", Method, 18}, + {"(*Signature).Underlying", Method, 5}, + {"(*Signature).Variadic", Method, 5}, + {"(*Slice).Elem", Method, 5}, + {"(*Slice).String", Method, 5}, + {"(*Slice).Underlying", Method, 5}, + {"(*StdSizes).Alignof", Method, 5}, + {"(*StdSizes).Offsetsof", Method, 5}, + {"(*StdSizes).Sizeof", Method, 5}, + {"(*Struct).Field", Method, 5}, + {"(*Struct).NumFields", Method, 5}, + {"(*Struct).String", Method, 5}, + {"(*Struct).Tag", Method, 5}, + {"(*Struct).Underlying", Method, 5}, + {"(*Term).String", Method, 18}, + {"(*Term).Tilde", Method, 18}, + {"(*Term).Type", Method, 18}, + {"(*Tuple).At", Method, 5}, + {"(*Tuple).Len", Method, 5}, + {"(*Tuple).String", Method, 5}, + {"(*Tuple).Underlying", Method, 5}, + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, + {"(*TypeName).Exported", Method, 5}, + {"(*TypeName).Id", Method, 5}, + {"(*TypeName).IsAlias", Method, 9}, + {"(*TypeName).Name", Method, 5}, + {"(*TypeName).Parent", Method, 5}, + {"(*TypeName).Pkg", Method, 5}, + {"(*TypeName).Pos", Method, 5}, + {"(*TypeName).String", Method, 5}, + {"(*TypeName).Type", Method, 5}, + {"(*TypeParam).Constraint", Method, 18}, + {"(*TypeParam).Index", Method, 18}, + {"(*TypeParam).Obj", Method, 18}, + {"(*TypeParam).SetConstraint", Method, 18}, + {"(*TypeParam).String", Method, 18}, + {"(*TypeParam).Underlying", Method, 18}, + {"(*TypeParamList).At", Method, 18}, + {"(*TypeParamList).Len", Method, 18}, + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, + {"(*Union).Underlying", Method, 18}, + {"(*Var).Anonymous", Method, 5}, + {"(*Var).Embedded", Method, 11}, + {"(*Var).Exported", Method, 5}, + {"(*Var).Id", Method, 5}, + {"(*Var).IsField", Method, 5}, + {"(*Var).Name", Method, 5}, + {"(*Var).Origin", Method, 19}, + {"(*Var).Parent", Method, 5}, + {"(*Var).Pkg", Method, 5}, + {"(*Var).Pos", Method, 5}, + {"(*Var).String", Method, 5}, + {"(*Var).Type", Method, 5}, + {"(Checker).ObjectOf", Method, 5}, + {"(Checker).PkgNameOf", Method, 22}, + {"(Checker).TypeOf", Method, 5}, + {"(Error).Error", Method, 5}, + {"(TypeAndValue).Addressable", Method, 5}, + {"(TypeAndValue).Assignable", Method, 5}, + {"(TypeAndValue).HasOk", Method, 5}, + {"(TypeAndValue).IsBuiltin", Method, 5}, + {"(TypeAndValue).IsNil", Method, 5}, + {"(TypeAndValue).IsType", Method, 5}, + {"(TypeAndValue).IsValue", Method, 5}, + {"(TypeAndValue).IsVoid", Method, 5}, + {"Alias", Type, 22}, + {"ArgumentError", Type, 18}, + {"ArgumentError.Err", Field, 18}, + {"ArgumentError.Index", Field, 18}, + {"Array", Type, 5}, + {"AssertableTo", Func, 5}, + {"AssignableTo", Func, 5}, + {"Basic", Type, 5}, + {"BasicInfo", Type, 5}, + {"BasicKind", Type, 5}, + {"Bool", Const, 5}, + {"Builtin", Type, 5}, + {"Byte", Const, 5}, + {"Chan", Type, 5}, + {"ChanDir", Type, 5}, + {"CheckExpr", Func, 13}, + {"Checker", Type, 5}, + {"Checker.Info", Field, 5}, + {"Comparable", Func, 5}, + {"Complex128", Const, 5}, + {"Complex64", Const, 5}, + {"Config", Type, 5}, + {"Config.Context", Field, 18}, + {"Config.DisableUnusedImportCheck", Field, 5}, + {"Config.Error", Field, 5}, + {"Config.FakeImportC", Field, 5}, + {"Config.GoVersion", Field, 18}, + {"Config.IgnoreFuncBodies", Field, 5}, + {"Config.Importer", Field, 5}, + {"Config.Sizes", Field, 5}, + {"Const", Type, 5}, + {"Context", Type, 18}, + {"ConvertibleTo", Func, 5}, + {"DefPredeclaredTestFuncs", Func, 5}, + {"Default", Func, 8}, + {"Error", Type, 5}, + {"Error.Fset", Field, 5}, + {"Error.Msg", Field, 5}, + {"Error.Pos", Field, 5}, + {"Error.Soft", Field, 5}, + {"Eval", Func, 5}, + {"ExprString", Func, 5}, + {"FieldVal", Const, 5}, + {"Float32", Const, 5}, + {"Float64", Const, 5}, + {"Func", Type, 5}, + {"Id", Func, 5}, + {"Identical", Func, 5}, + {"IdenticalIgnoreTags", Func, 8}, + {"Implements", Func, 5}, + {"ImportMode", Type, 6}, + {"Importer", Type, 5}, + {"ImporterFrom", Type, 6}, + {"Info", Type, 5}, + {"Info.Defs", Field, 5}, + {"Info.FileVersions", Field, 22}, + {"Info.Implicits", Field, 5}, + {"Info.InitOrder", Field, 5}, + {"Info.Instances", Field, 18}, + {"Info.Scopes", Field, 5}, + {"Info.Selections", Field, 5}, + {"Info.Types", Field, 5}, + {"Info.Uses", Field, 5}, + {"Initializer", Type, 5}, + {"Initializer.Lhs", Field, 5}, + {"Initializer.Rhs", Field, 5}, + {"Instance", Type, 18}, + {"Instance.Type", Field, 18}, + {"Instance.TypeArgs", Field, 18}, + {"Instantiate", Func, 18}, + {"Int", Const, 5}, + {"Int16", Const, 5}, + {"Int32", Const, 5}, + {"Int64", Const, 5}, + {"Int8", Const, 5}, + {"Interface", Type, 5}, + {"Invalid", Const, 5}, + {"IsBoolean", Const, 5}, + {"IsComplex", Const, 5}, + {"IsConstType", Const, 5}, + {"IsFloat", Const, 5}, + {"IsInteger", Const, 5}, + {"IsInterface", Func, 5}, + {"IsNumeric", Const, 5}, + {"IsOrdered", Const, 5}, + {"IsString", Const, 5}, + {"IsUnsigned", Const, 5}, + {"IsUntyped", Const, 5}, + {"Label", Type, 5}, + {"LookupFieldOrMethod", Func, 5}, + {"Map", Type, 5}, + {"MethodExpr", Const, 5}, + {"MethodSet", Type, 5}, + {"MethodVal", Const, 5}, + {"MissingMethod", Func, 5}, + {"Named", Type, 5}, + {"NewAlias", Func, 22}, + {"NewArray", Func, 5}, + {"NewChan", Func, 5}, + {"NewChecker", Func, 5}, + {"NewConst", Func, 5}, + {"NewContext", Func, 18}, + {"NewField", Func, 5}, + {"NewFunc", Func, 5}, + {"NewInterface", Func, 5}, + {"NewInterfaceType", Func, 11}, + {"NewLabel", Func, 5}, + {"NewMap", Func, 5}, + {"NewMethodSet", Func, 5}, + {"NewNamed", Func, 5}, + {"NewPackage", Func, 5}, + {"NewParam", Func, 5}, + {"NewPkgName", Func, 5}, + {"NewPointer", Func, 5}, + {"NewScope", Func, 5}, + {"NewSignature", Func, 5}, + {"NewSignatureType", Func, 18}, + {"NewSlice", Func, 5}, + {"NewStruct", Func, 5}, + {"NewTerm", Func, 18}, + {"NewTuple", Func, 5}, + {"NewTypeName", Func, 5}, + {"NewTypeParam", Func, 18}, + {"NewUnion", Func, 18}, + {"NewVar", Func, 5}, + {"Nil", Type, 5}, + {"Object", Type, 5}, + {"ObjectString", Func, 5}, + {"Package", Type, 5}, + {"PkgName", Type, 5}, + {"Pointer", Type, 5}, + {"Qualifier", Type, 5}, + {"RecvOnly", Const, 5}, + {"RelativeTo", Func, 5}, + {"Rune", Const, 5}, + {"Satisfies", Func, 20}, + {"Scope", Type, 5}, + {"Selection", Type, 5}, + {"SelectionKind", Type, 5}, + {"SelectionString", Func, 5}, + {"SendOnly", Const, 5}, + {"SendRecv", Const, 5}, + {"Signature", Type, 5}, + {"Sizes", Type, 5}, + {"SizesFor", Func, 9}, + {"Slice", Type, 5}, + {"StdSizes", Type, 5}, + {"StdSizes.MaxAlign", Field, 5}, + {"StdSizes.WordSize", Field, 5}, + {"String", Const, 5}, + {"Struct", Type, 5}, + {"Term", Type, 18}, + {"Tuple", Type, 5}, + {"Typ", Var, 5}, + {"Type", Type, 5}, + {"TypeAndValue", Type, 5}, + {"TypeAndValue.Type", Field, 5}, + {"TypeAndValue.Value", Field, 5}, + {"TypeList", Type, 18}, + {"TypeName", Type, 5}, + {"TypeParam", Type, 18}, + {"TypeParamList", Type, 18}, + {"TypeString", Func, 5}, + {"Uint", Const, 5}, + {"Uint16", Const, 5}, + {"Uint32", Const, 5}, + {"Uint64", Const, 5}, + {"Uint8", Const, 5}, + {"Uintptr", Const, 5}, + {"Unalias", Func, 22}, + {"Union", Type, 18}, + {"Universe", Var, 5}, + {"Unsafe", Var, 5}, + {"UnsafePointer", Const, 5}, + {"UntypedBool", Const, 5}, + {"UntypedComplex", Const, 5}, + {"UntypedFloat", Const, 5}, + {"UntypedInt", Const, 5}, + {"UntypedNil", Const, 5}, + {"UntypedRune", Const, 5}, + {"UntypedString", Const, 5}, + {"Var", Type, 5}, + {"WriteExpr", Func, 5}, + {"WriteSignature", Func, 5}, + {"WriteType", Func, 5}, + }, + "go/version": { + {"Compare", Func, 22}, + {"IsValid", Func, 22}, + {"Lang", Func, 22}, + }, + "hash": { + {"Hash", Type, 0}, + {"Hash32", Type, 0}, + {"Hash64", Type, 0}, + }, + "hash/adler32": { + {"Checksum", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0}, + {"Checksum", Func, 0}, + {"ChecksumIEEE", Func, 0}, + {"IEEE", Const, 0}, + {"IEEETable", Var, 0}, + {"Koopman", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"NewIEEE", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/crc64": { + {"Checksum", Func, 0}, + {"ECMA", Const, 0}, + {"ISO", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/fnv": { + {"New128", Func, 9}, + {"New128a", Func, 9}, + {"New32", Func, 0}, + {"New32a", Func, 0}, + {"New64", Func, 0}, + {"New64a", Func, 0}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14}, + {"(*Hash).Reset", Method, 14}, + {"(*Hash).Seed", Method, 14}, + {"(*Hash).SetSeed", Method, 14}, + {"(*Hash).Size", Method, 14}, + {"(*Hash).Sum", Method, 14}, + {"(*Hash).Sum64", Method, 14}, + {"(*Hash).Write", Method, 14}, + {"(*Hash).WriteByte", Method, 14}, + {"(*Hash).WriteString", Method, 14}, + {"Bytes", Func, 19}, + {"Hash", Type, 14}, + {"MakeSeed", Func, 14}, + {"Seed", Type, 14}, + {"String", Func, 19}, + }, + "html": { + {"EscapeString", Func, 0}, + {"UnescapeString", Func, 0}, + }, + "html/template": { + {"(*Error).Error", Method, 0}, + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 6}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"CSS", Type, 0}, + {"ErrAmbigContext", Const, 0}, + {"ErrBadHTML", Const, 0}, + {"ErrBranchEnd", Const, 0}, + {"ErrEndContext", Const, 0}, + {"ErrJSTemplate", Const, 21}, + {"ErrNoSuchTemplate", Const, 0}, + {"ErrOutputContext", Const, 0}, + {"ErrPartialCharset", Const, 0}, + {"ErrPartialEscape", Const, 0}, + {"ErrPredefinedEscaper", Const, 9}, + {"ErrRangeLoopReentry", Const, 0}, + {"ErrSlashAmbig", Const, 0}, + {"Error", Type, 0}, + {"Error.Description", Field, 0}, + {"Error.ErrorCode", Field, 0}, + {"Error.Line", Field, 0}, + {"Error.Name", Field, 0}, + {"Error.Node", Field, 4}, + {"ErrorCode", Type, 0}, + {"FuncMap", Type, 0}, + {"HTML", Type, 0}, + {"HTMLAttr", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JS", Type, 0}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"JSStr", Type, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"OK", Const, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Srcset", Type, 10}, + {"Template", Type, 0}, + {"Template.Tree", Field, 2}, + {"URL", Type, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4}, + {"(*Alpha).At", Method, 0}, + {"(*Alpha).Bounds", Method, 0}, + {"(*Alpha).ColorModel", Method, 0}, + {"(*Alpha).Opaque", Method, 0}, + {"(*Alpha).PixOffset", Method, 0}, + {"(*Alpha).RGBA64At", Method, 17}, + {"(*Alpha).Set", Method, 0}, + {"(*Alpha).SetAlpha", Method, 0}, + {"(*Alpha).SetRGBA64", Method, 17}, + {"(*Alpha).SubImage", Method, 0}, + {"(*Alpha16).Alpha16At", Method, 4}, + {"(*Alpha16).At", Method, 0}, + {"(*Alpha16).Bounds", Method, 0}, + {"(*Alpha16).ColorModel", Method, 0}, + {"(*Alpha16).Opaque", Method, 0}, + {"(*Alpha16).PixOffset", Method, 0}, + {"(*Alpha16).RGBA64At", Method, 17}, + {"(*Alpha16).Set", Method, 0}, + {"(*Alpha16).SetAlpha16", Method, 0}, + {"(*Alpha16).SetRGBA64", Method, 17}, + {"(*Alpha16).SubImage", Method, 0}, + {"(*CMYK).At", Method, 5}, + {"(*CMYK).Bounds", Method, 5}, + {"(*CMYK).CMYKAt", Method, 5}, + {"(*CMYK).ColorModel", Method, 5}, + {"(*CMYK).Opaque", Method, 5}, + {"(*CMYK).PixOffset", Method, 5}, + {"(*CMYK).RGBA64At", Method, 17}, + {"(*CMYK).Set", Method, 5}, + {"(*CMYK).SetCMYK", Method, 5}, + {"(*CMYK).SetRGBA64", Method, 17}, + {"(*CMYK).SubImage", Method, 5}, + {"(*Gray).At", Method, 0}, + {"(*Gray).Bounds", Method, 0}, + {"(*Gray).ColorModel", Method, 0}, + {"(*Gray).GrayAt", Method, 4}, + {"(*Gray).Opaque", Method, 0}, + {"(*Gray).PixOffset", Method, 0}, + {"(*Gray).RGBA64At", Method, 17}, + {"(*Gray).Set", Method, 0}, + {"(*Gray).SetGray", Method, 0}, + {"(*Gray).SetRGBA64", Method, 17}, + {"(*Gray).SubImage", Method, 0}, + {"(*Gray16).At", Method, 0}, + {"(*Gray16).Bounds", Method, 0}, + {"(*Gray16).ColorModel", Method, 0}, + {"(*Gray16).Gray16At", Method, 4}, + {"(*Gray16).Opaque", Method, 0}, + {"(*Gray16).PixOffset", Method, 0}, + {"(*Gray16).RGBA64At", Method, 17}, + {"(*Gray16).Set", Method, 0}, + {"(*Gray16).SetGray16", Method, 0}, + {"(*Gray16).SetRGBA64", Method, 17}, + {"(*Gray16).SubImage", Method, 0}, + {"(*NRGBA).At", Method, 0}, + {"(*NRGBA).Bounds", Method, 0}, + {"(*NRGBA).ColorModel", Method, 0}, + {"(*NRGBA).NRGBAAt", Method, 4}, + {"(*NRGBA).Opaque", Method, 0}, + {"(*NRGBA).PixOffset", Method, 0}, + {"(*NRGBA).RGBA64At", Method, 17}, + {"(*NRGBA).Set", Method, 0}, + {"(*NRGBA).SetNRGBA", Method, 0}, + {"(*NRGBA).SetRGBA64", Method, 17}, + {"(*NRGBA).SubImage", Method, 0}, + {"(*NRGBA64).At", Method, 0}, + {"(*NRGBA64).Bounds", Method, 0}, + {"(*NRGBA64).ColorModel", Method, 0}, + {"(*NRGBA64).NRGBA64At", Method, 4}, + {"(*NRGBA64).Opaque", Method, 0}, + {"(*NRGBA64).PixOffset", Method, 0}, + {"(*NRGBA64).RGBA64At", Method, 17}, + {"(*NRGBA64).Set", Method, 0}, + {"(*NRGBA64).SetNRGBA64", Method, 0}, + {"(*NRGBA64).SetRGBA64", Method, 17}, + {"(*NRGBA64).SubImage", Method, 0}, + {"(*NYCbCrA).AOffset", Method, 6}, + {"(*NYCbCrA).At", Method, 6}, + {"(*NYCbCrA).Bounds", Method, 6}, + {"(*NYCbCrA).COffset", Method, 6}, + {"(*NYCbCrA).ColorModel", Method, 6}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6}, + {"(*NYCbCrA).Opaque", Method, 6}, + {"(*NYCbCrA).RGBA64At", Method, 17}, + {"(*NYCbCrA).SubImage", Method, 6}, + {"(*NYCbCrA).YCbCrAt", Method, 6}, + {"(*NYCbCrA).YOffset", Method, 6}, + {"(*Paletted).At", Method, 0}, + {"(*Paletted).Bounds", Method, 0}, + {"(*Paletted).ColorIndexAt", Method, 0}, + {"(*Paletted).ColorModel", Method, 0}, + {"(*Paletted).Opaque", Method, 0}, + {"(*Paletted).PixOffset", Method, 0}, + {"(*Paletted).RGBA64At", Method, 17}, + {"(*Paletted).Set", Method, 0}, + {"(*Paletted).SetColorIndex", Method, 0}, + {"(*Paletted).SetRGBA64", Method, 17}, + {"(*Paletted).SubImage", Method, 0}, + {"(*RGBA).At", Method, 0}, + {"(*RGBA).Bounds", Method, 0}, + {"(*RGBA).ColorModel", Method, 0}, + {"(*RGBA).Opaque", Method, 0}, + {"(*RGBA).PixOffset", Method, 0}, + {"(*RGBA).RGBA64At", Method, 17}, + {"(*RGBA).RGBAAt", Method, 4}, + {"(*RGBA).Set", Method, 0}, + {"(*RGBA).SetRGBA", Method, 0}, + {"(*RGBA).SetRGBA64", Method, 17}, + {"(*RGBA).SubImage", Method, 0}, + {"(*RGBA64).At", Method, 0}, + {"(*RGBA64).Bounds", Method, 0}, + {"(*RGBA64).ColorModel", Method, 0}, + {"(*RGBA64).Opaque", Method, 0}, + {"(*RGBA64).PixOffset", Method, 0}, + {"(*RGBA64).RGBA64At", Method, 4}, + {"(*RGBA64).Set", Method, 0}, + {"(*RGBA64).SetRGBA64", Method, 0}, + {"(*RGBA64).SubImage", Method, 0}, + {"(*Uniform).At", Method, 0}, + {"(*Uniform).Bounds", Method, 0}, + {"(*Uniform).ColorModel", Method, 0}, + {"(*Uniform).Convert", Method, 0}, + {"(*Uniform).Opaque", Method, 0}, + {"(*Uniform).RGBA", Method, 0}, + {"(*Uniform).RGBA64At", Method, 17}, + {"(*YCbCr).At", Method, 0}, + {"(*YCbCr).Bounds", Method, 0}, + {"(*YCbCr).COffset", Method, 0}, + {"(*YCbCr).ColorModel", Method, 0}, + {"(*YCbCr).Opaque", Method, 0}, + {"(*YCbCr).RGBA64At", Method, 17}, + {"(*YCbCr).SubImage", Method, 0}, + {"(*YCbCr).YCbCrAt", Method, 4}, + {"(*YCbCr).YOffset", Method, 0}, + {"(Point).Add", Method, 0}, + {"(Point).Div", Method, 0}, + {"(Point).Eq", Method, 0}, + {"(Point).In", Method, 0}, + {"(Point).Mod", Method, 0}, + {"(Point).Mul", Method, 0}, + {"(Point).String", Method, 0}, + {"(Point).Sub", Method, 0}, + {"(Rectangle).Add", Method, 0}, + {"(Rectangle).At", Method, 5}, + {"(Rectangle).Bounds", Method, 5}, + {"(Rectangle).Canon", Method, 0}, + {"(Rectangle).ColorModel", Method, 5}, + {"(Rectangle).Dx", Method, 0}, + {"(Rectangle).Dy", Method, 0}, + {"(Rectangle).Empty", Method, 0}, + {"(Rectangle).Eq", Method, 0}, + {"(Rectangle).In", Method, 0}, + {"(Rectangle).Inset", Method, 0}, + {"(Rectangle).Intersect", Method, 0}, + {"(Rectangle).Overlaps", Method, 0}, + {"(Rectangle).RGBA64At", Method, 17}, + {"(Rectangle).Size", Method, 0}, + {"(Rectangle).String", Method, 0}, + {"(Rectangle).Sub", Method, 0}, + {"(Rectangle).Union", Method, 0}, + {"(YCbCrSubsampleRatio).String", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.Pix", Field, 0}, + {"Alpha.Rect", Field, 0}, + {"Alpha.Stride", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.Pix", Field, 0}, + {"Alpha16.Rect", Field, 0}, + {"Alpha16.Stride", Field, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.Pix", Field, 5}, + {"CMYK.Rect", Field, 5}, + {"CMYK.Stride", Field, 5}, + {"Config", Type, 0}, + {"Config.ColorModel", Field, 0}, + {"Config.Height", Field, 0}, + {"Config.Width", Field, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"ErrFormat", Var, 0}, + {"Gray", Type, 0}, + {"Gray.Pix", Field, 0}, + {"Gray.Rect", Field, 0}, + {"Gray.Stride", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Pix", Field, 0}, + {"Gray16.Rect", Field, 0}, + {"Gray16.Stride", Field, 0}, + {"Image", Type, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.Pix", Field, 0}, + {"NRGBA.Rect", Field, 0}, + {"NRGBA.Stride", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.Pix", Field, 0}, + {"NRGBA64.Rect", Field, 0}, + {"NRGBA64.Stride", Field, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.AStride", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NewAlpha", Func, 0}, + {"NewAlpha16", Func, 0}, + {"NewCMYK", Func, 5}, + {"NewGray", Func, 0}, + {"NewGray16", Func, 0}, + {"NewNRGBA", Func, 0}, + {"NewNRGBA64", Func, 0}, + {"NewNYCbCrA", Func, 6}, + {"NewPaletted", Func, 0}, + {"NewRGBA", Func, 0}, + {"NewRGBA64", Func, 0}, + {"NewUniform", Func, 0}, + {"NewYCbCr", Func, 0}, + {"Opaque", Var, 0}, + {"Paletted", Type, 0}, + {"Paletted.Palette", Field, 0}, + {"Paletted.Pix", Field, 0}, + {"Paletted.Rect", Field, 0}, + {"Paletted.Stride", Field, 0}, + {"PalettedImage", Type, 0}, + {"Point", Type, 0}, + {"Point.X", Field, 0}, + {"Point.Y", Field, 0}, + {"Pt", Func, 0}, + {"RGBA", Type, 0}, + {"RGBA.Pix", Field, 0}, + {"RGBA.Rect", Field, 0}, + {"RGBA.Stride", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.Pix", Field, 0}, + {"RGBA64.Rect", Field, 0}, + {"RGBA64.Stride", Field, 0}, + {"RGBA64Image", Type, 17}, + {"Rect", Func, 0}, + {"Rectangle", Type, 0}, + {"Rectangle.Max", Field, 0}, + {"Rectangle.Min", Field, 0}, + {"RegisterFormat", Func, 0}, + {"Transparent", Var, 0}, + {"Uniform", Type, 0}, + {"Uniform.C", Field, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.CStride", Field, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Rect", Field, 0}, + {"YCbCr.SubsampleRatio", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCr.YStride", Field, 0}, + {"YCbCrSubsampleRatio", Type, 0}, + {"YCbCrSubsampleRatio410", Const, 5}, + {"YCbCrSubsampleRatio411", Const, 5}, + {"YCbCrSubsampleRatio420", Const, 0}, + {"YCbCrSubsampleRatio422", Const, 0}, + {"YCbCrSubsampleRatio440", Const, 1}, + {"YCbCrSubsampleRatio444", Const, 0}, + {"ZP", Var, 0}, + {"ZR", Var, 0}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0}, + {"(Alpha16).RGBA", Method, 0}, + {"(CMYK).RGBA", Method, 5}, + {"(Gray).RGBA", Method, 0}, + {"(Gray16).RGBA", Method, 0}, + {"(NRGBA).RGBA", Method, 0}, + {"(NRGBA64).RGBA", Method, 0}, + {"(NYCbCrA).RGBA", Method, 6}, + {"(Palette).Convert", Method, 0}, + {"(Palette).Index", Method, 0}, + {"(RGBA).RGBA", Method, 0}, + {"(RGBA64).RGBA", Method, 0}, + {"(YCbCr).RGBA", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.A", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.A", Field, 0}, + {"Alpha16Model", Var, 0}, + {"AlphaModel", Var, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.C", Field, 5}, + {"CMYK.K", Field, 5}, + {"CMYK.M", Field, 5}, + {"CMYK.Y", Field, 5}, + {"CMYKModel", Var, 5}, + {"CMYKToRGB", Func, 5}, + {"Color", Type, 0}, + {"Gray", Type, 0}, + {"Gray.Y", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Y", Field, 0}, + {"Gray16Model", Var, 0}, + {"GrayModel", Var, 0}, + {"Model", Type, 0}, + {"ModelFunc", Func, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.A", Field, 0}, + {"NRGBA.B", Field, 0}, + {"NRGBA.G", Field, 0}, + {"NRGBA.R", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.A", Field, 0}, + {"NRGBA64.B", Field, 0}, + {"NRGBA64.G", Field, 0}, + {"NRGBA64.R", Field, 0}, + {"NRGBA64Model", Var, 0}, + {"NRGBAModel", Var, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NYCbCrAModel", Var, 6}, + {"Opaque", Var, 0}, + {"Palette", Type, 0}, + {"RGBA", Type, 0}, + {"RGBA.A", Field, 0}, + {"RGBA.B", Field, 0}, + {"RGBA.G", Field, 0}, + {"RGBA.R", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.A", Field, 0}, + {"RGBA64.B", Field, 0}, + {"RGBA64.G", Field, 0}, + {"RGBA64.R", Field, 0}, + {"RGBA64Model", Var, 0}, + {"RGBAModel", Var, 0}, + {"RGBToCMYK", Func, 5}, + {"RGBToYCbCr", Func, 0}, + {"Transparent", Var, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCrModel", Var, 0}, + {"YCbCrToRGB", Func, 0}, + }, + "image/color/palette": { + {"Plan9", Var, 2}, + {"WebSafe", Var, 2}, + }, + "image/draw": { + {"(Op).Draw", Method, 2}, + {"Draw", Func, 0}, + {"DrawMask", Func, 0}, + {"Drawer", Type, 2}, + {"FloydSteinberg", Var, 2}, + {"Image", Type, 0}, + {"Op", Type, 0}, + {"Over", Const, 0}, + {"Quantizer", Type, 2}, + {"RGBA64Image", Type, 17}, + {"Src", Const, 0}, + }, + "image/gif": { + {"Decode", Func, 0}, + {"DecodeAll", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DisposalBackground", Const, 5}, + {"DisposalNone", Const, 5}, + {"DisposalPrevious", Const, 5}, + {"Encode", Func, 2}, + {"EncodeAll", Func, 2}, + {"GIF", Type, 0}, + {"GIF.BackgroundIndex", Field, 5}, + {"GIF.Config", Field, 5}, + {"GIF.Delay", Field, 0}, + {"GIF.Disposal", Field, 5}, + {"GIF.Image", Field, 0}, + {"GIF.LoopCount", Field, 0}, + {"Options", Type, 2}, + {"Options.Drawer", Field, 2}, + {"Options.NumColors", Field, 2}, + {"Options.Quantizer", Field, 2}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultQuality", Const, 0}, + {"Encode", Func, 0}, + {"FormatError", Type, 0}, + {"Options", Type, 0}, + {"Options.Quality", Field, 0}, + {"Reader", Type, 0}, + {"UnsupportedError", Type, 0}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4}, + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"BestCompression", Const, 4}, + {"BestSpeed", Const, 4}, + {"CompressionLevel", Type, 4}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultCompression", Const, 4}, + {"Encode", Func, 0}, + {"Encoder", Type, 4}, + {"Encoder.BufferPool", Field, 9}, + {"Encoder.CompressionLevel", Field, 4}, + {"EncoderBuffer", Type, 9}, + {"EncoderBufferPool", Type, 9}, + {"FormatError", Type, 0}, + {"NoCompression", Const, 4}, + {"UnsupportedError", Type, 0}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0}, + {"(*Index).FindAllIndex", Method, 0}, + {"(*Index).Lookup", Method, 0}, + {"(*Index).Read", Method, 0}, + {"(*Index).Write", Method, 0}, + {"Index", Type, 0}, + {"New", Func, 0}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0}, + {"(*OffsetWriter).Seek", Method, 20}, + {"(*OffsetWriter).Write", Method, 20}, + {"(*OffsetWriter).WriteAt", Method, 20}, + {"(*PipeReader).Close", Method, 0}, + {"(*PipeReader).CloseWithError", Method, 0}, + {"(*PipeReader).Read", Method, 0}, + {"(*PipeWriter).Close", Method, 0}, + {"(*PipeWriter).CloseWithError", Method, 0}, + {"(*PipeWriter).Write", Method, 0}, + {"(*SectionReader).Outer", Method, 22}, + {"(*SectionReader).Read", Method, 0}, + {"(*SectionReader).ReadAt", Method, 0}, + {"(*SectionReader).Seek", Method, 0}, + {"(*SectionReader).Size", Method, 0}, + {"ByteReader", Type, 0}, + {"ByteScanner", Type, 0}, + {"ByteWriter", Type, 1}, + {"Closer", Type, 0}, + {"Copy", Func, 0}, + {"CopyBuffer", Func, 5}, + {"CopyN", Func, 0}, + {"Discard", Var, 16}, + {"EOF", Var, 0}, + {"ErrClosedPipe", Var, 0}, + {"ErrNoProgress", Var, 1}, + {"ErrShortBuffer", Var, 0}, + {"ErrShortWrite", Var, 0}, + {"ErrUnexpectedEOF", Var, 0}, + {"LimitReader", Func, 0}, + {"LimitedReader", Type, 0}, + {"LimitedReader.N", Field, 0}, + {"LimitedReader.R", Field, 0}, + {"MultiReader", Func, 0}, + {"MultiWriter", Func, 0}, + {"NewOffsetWriter", Func, 20}, + {"NewSectionReader", Func, 0}, + {"NopCloser", Func, 16}, + {"OffsetWriter", Type, 20}, + {"Pipe", Func, 0}, + {"PipeReader", Type, 0}, + {"PipeWriter", Type, 0}, + {"ReadAll", Func, 16}, + {"ReadAtLeast", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadFull", Func, 0}, + {"ReadSeekCloser", Type, 16}, + {"ReadSeeker", Type, 0}, + {"ReadWriteCloser", Type, 0}, + {"ReadWriteSeeker", Type, 0}, + {"ReadWriter", Type, 0}, + {"Reader", Type, 0}, + {"ReaderAt", Type, 0}, + {"ReaderFrom", Type, 0}, + {"RuneReader", Type, 0}, + {"RuneScanner", Type, 0}, + {"SectionReader", Type, 0}, + {"SeekCurrent", Const, 7}, + {"SeekEnd", Const, 7}, + {"SeekStart", Const, 7}, + {"Seeker", Type, 0}, + {"StringWriter", Type, 12}, + {"TeeReader", Func, 0}, + {"WriteCloser", Type, 0}, + {"WriteSeeker", Type, 0}, + {"WriteString", Func, 0}, + {"Writer", Type, 0}, + {"WriterAt", Type, 0}, + {"WriterTo", Type, 0}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16}, + {"(*PathError).Timeout", Method, 16}, + {"(*PathError).Unwrap", Method, 16}, + {"(FileMode).IsDir", Method, 16}, + {"(FileMode).IsRegular", Method, 16}, + {"(FileMode).Perm", Method, 16}, + {"(FileMode).String", Method, 16}, + {"(FileMode).Type", Method, 16}, + {"DirEntry", Type, 16}, + {"ErrClosed", Var, 16}, + {"ErrExist", Var, 16}, + {"ErrInvalid", Var, 16}, + {"ErrNotExist", Var, 16}, + {"ErrPermission", Var, 16}, + {"FS", Type, 16}, + {"File", Type, 16}, + {"FileInfo", Type, 16}, + {"FileInfoToDirEntry", Func, 17}, + {"FileMode", Type, 16}, + {"FormatDirEntry", Func, 21}, + {"FormatFileInfo", Func, 21}, + {"Glob", Func, 16}, + {"GlobFS", Type, 16}, + {"ModeAppend", Const, 16}, + {"ModeCharDevice", Const, 16}, + {"ModeDevice", Const, 16}, + {"ModeDir", Const, 16}, + {"ModeExclusive", Const, 16}, + {"ModeIrregular", Const, 16}, + {"ModeNamedPipe", Const, 16}, + {"ModePerm", Const, 16}, + {"ModeSetgid", Const, 16}, + {"ModeSetuid", Const, 16}, + {"ModeSocket", Const, 16}, + {"ModeSticky", Const, 16}, + {"ModeSymlink", Const, 16}, + {"ModeTemporary", Const, 16}, + {"ModeType", Const, 16}, + {"PathError", Type, 16}, + {"PathError.Err", Field, 16}, + {"PathError.Op", Field, 16}, + {"PathError.Path", Field, 16}, + {"ReadDir", Func, 16}, + {"ReadDirFS", Type, 16}, + {"ReadDirFile", Type, 16}, + {"ReadFile", Func, 16}, + {"ReadFileFS", Type, 16}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 16}, + {"Stat", Func, 16}, + {"StatFS", Type, 16}, + {"Sub", Func, 16}, + {"SubFS", Type, 16}, + {"ValidPath", Func, 16}, + {"WalkDir", Func, 16}, + {"WalkDirFunc", Type, 16}, + }, + "io/ioutil": { + {"Discard", Var, 0}, + {"NopCloser", Func, 0}, + {"ReadAll", Func, 0}, + {"ReadDir", Func, 0}, + {"ReadFile", Func, 0}, + {"TempDir", Func, 0}, + {"TempFile", Func, 0}, + {"WriteFile", Func, 0}, + }, + "log": { + {"(*Logger).Fatal", Method, 0}, + {"(*Logger).Fatalf", Method, 0}, + {"(*Logger).Fatalln", Method, 0}, + {"(*Logger).Flags", Method, 0}, + {"(*Logger).Output", Method, 0}, + {"(*Logger).Panic", Method, 0}, + {"(*Logger).Panicf", Method, 0}, + {"(*Logger).Panicln", Method, 0}, + {"(*Logger).Prefix", Method, 0}, + {"(*Logger).Print", Method, 0}, + {"(*Logger).Printf", Method, 0}, + {"(*Logger).Println", Method, 0}, + {"(*Logger).SetFlags", Method, 0}, + {"(*Logger).SetOutput", Method, 5}, + {"(*Logger).SetPrefix", Method, 0}, + {"(*Logger).Writer", Method, 12}, + {"Default", Func, 16}, + {"Fatal", Func, 0}, + {"Fatalf", Func, 0}, + {"Fatalln", Func, 0}, + {"Flags", Func, 0}, + {"LUTC", Const, 5}, + {"Ldate", Const, 0}, + {"Llongfile", Const, 0}, + {"Lmicroseconds", Const, 0}, + {"Lmsgprefix", Const, 14}, + {"Logger", Type, 0}, + {"Lshortfile", Const, 0}, + {"LstdFlags", Const, 0}, + {"Ltime", Const, 0}, + {"New", Func, 0}, + {"Output", Func, 5}, + {"Panic", Func, 0}, + {"Panicf", Func, 0}, + {"Panicln", Func, 0}, + {"Prefix", Func, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"SetFlags", Func, 0}, + {"SetOutput", Func, 0}, + {"SetPrefix", Func, 0}, + {"Writer", Func, 13}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21}, + {"(*JSONHandler).Handle", Method, 21}, + {"(*JSONHandler).WithAttrs", Method, 21}, + {"(*JSONHandler).WithGroup", Method, 21}, + {"(*Level).UnmarshalJSON", Method, 21}, + {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).Level", Method, 21}, + {"(*LevelVar).MarshalText", Method, 21}, + {"(*LevelVar).Set", Method, 21}, + {"(*LevelVar).String", Method, 21}, + {"(*LevelVar).UnmarshalText", Method, 21}, + {"(*Logger).Debug", Method, 21}, + {"(*Logger).DebugContext", Method, 21}, + {"(*Logger).Enabled", Method, 21}, + {"(*Logger).Error", Method, 21}, + {"(*Logger).ErrorContext", Method, 21}, + {"(*Logger).Handler", Method, 21}, + {"(*Logger).Info", Method, 21}, + {"(*Logger).InfoContext", Method, 21}, + {"(*Logger).Log", Method, 21}, + {"(*Logger).LogAttrs", Method, 21}, + {"(*Logger).Warn", Method, 21}, + {"(*Logger).WarnContext", Method, 21}, + {"(*Logger).With", Method, 21}, + {"(*Logger).WithGroup", Method, 21}, + {"(*Record).Add", Method, 21}, + {"(*Record).AddAttrs", Method, 21}, + {"(*TextHandler).Enabled", Method, 21}, + {"(*TextHandler).Handle", Method, 21}, + {"(*TextHandler).WithAttrs", Method, 21}, + {"(*TextHandler).WithGroup", Method, 21}, + {"(Attr).Equal", Method, 21}, + {"(Attr).String", Method, 21}, + {"(Kind).String", Method, 21}, + {"(Level).Level", Method, 21}, + {"(Level).MarshalJSON", Method, 21}, + {"(Level).MarshalText", Method, 21}, + {"(Level).String", Method, 21}, + {"(Record).Attrs", Method, 21}, + {"(Record).Clone", Method, 21}, + {"(Record).NumAttrs", Method, 21}, + {"(Value).Any", Method, 21}, + {"(Value).Bool", Method, 21}, + {"(Value).Duration", Method, 21}, + {"(Value).Equal", Method, 21}, + {"(Value).Float64", Method, 21}, + {"(Value).Group", Method, 21}, + {"(Value).Int64", Method, 21}, + {"(Value).Kind", Method, 21}, + {"(Value).LogValuer", Method, 21}, + {"(Value).Resolve", Method, 21}, + {"(Value).String", Method, 21}, + {"(Value).Time", Method, 21}, + {"(Value).Uint64", Method, 21}, + {"Any", Func, 21}, + {"AnyValue", Func, 21}, + {"Attr", Type, 21}, + {"Attr.Key", Field, 21}, + {"Attr.Value", Field, 21}, + {"Bool", Func, 21}, + {"BoolValue", Func, 21}, + {"Debug", Func, 21}, + {"DebugContext", Func, 21}, + {"Default", Func, 21}, + {"Duration", Func, 21}, + {"DurationValue", Func, 21}, + {"Error", Func, 21}, + {"ErrorContext", Func, 21}, + {"Float64", Func, 21}, + {"Float64Value", Func, 21}, + {"Group", Func, 21}, + {"GroupValue", Func, 21}, + {"Handler", Type, 21}, + {"HandlerOptions", Type, 21}, + {"HandlerOptions.AddSource", Field, 21}, + {"HandlerOptions.Level", Field, 21}, + {"HandlerOptions.ReplaceAttr", Field, 21}, + {"Info", Func, 21}, + {"InfoContext", Func, 21}, + {"Int", Func, 21}, + {"Int64", Func, 21}, + {"Int64Value", Func, 21}, + {"IntValue", Func, 21}, + {"JSONHandler", Type, 21}, + {"Kind", Type, 21}, + {"KindAny", Const, 21}, + {"KindBool", Const, 21}, + {"KindDuration", Const, 21}, + {"KindFloat64", Const, 21}, + {"KindGroup", Const, 21}, + {"KindInt64", Const, 21}, + {"KindLogValuer", Const, 21}, + {"KindString", Const, 21}, + {"KindTime", Const, 21}, + {"KindUint64", Const, 21}, + {"Level", Type, 21}, + {"LevelDebug", Const, 21}, + {"LevelError", Const, 21}, + {"LevelInfo", Const, 21}, + {"LevelKey", Const, 21}, + {"LevelVar", Type, 21}, + {"LevelWarn", Const, 21}, + {"Leveler", Type, 21}, + {"Log", Func, 21}, + {"LogAttrs", Func, 21}, + {"LogValuer", Type, 21}, + {"Logger", Type, 21}, + {"MessageKey", Const, 21}, + {"New", Func, 21}, + {"NewJSONHandler", Func, 21}, + {"NewLogLogger", Func, 21}, + {"NewRecord", Func, 21}, + {"NewTextHandler", Func, 21}, + {"Record", Type, 21}, + {"Record.Level", Field, 21}, + {"Record.Message", Field, 21}, + {"Record.PC", Field, 21}, + {"Record.Time", Field, 21}, + {"SetDefault", Func, 21}, + {"SetLogLoggerLevel", Func, 22}, + {"Source", Type, 21}, + {"Source.File", Field, 21}, + {"Source.Function", Field, 21}, + {"Source.Line", Field, 21}, + {"SourceKey", Const, 21}, + {"String", Func, 21}, + {"StringValue", Func, 21}, + {"TextHandler", Type, 21}, + {"Time", Func, 21}, + {"TimeKey", Const, 21}, + {"TimeValue", Func, 21}, + {"Uint64", Func, 21}, + {"Uint64Value", Func, 21}, + {"Value", Type, 21}, + {"Warn", Func, 21}, + {"WarnContext", Func, 21}, + {"With", Func, 21}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Crit", Method, 0}, + {"(*Writer).Debug", Method, 0}, + {"(*Writer).Emerg", Method, 0}, + {"(*Writer).Err", Method, 0}, + {"(*Writer).Info", Method, 0}, + {"(*Writer).Notice", Method, 0}, + {"(*Writer).Warning", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"Dial", Func, 0}, + {"LOG_ALERT", Const, 0}, + {"LOG_AUTH", Const, 1}, + {"LOG_AUTHPRIV", Const, 1}, + {"LOG_CRIT", Const, 0}, + {"LOG_CRON", Const, 1}, + {"LOG_DAEMON", Const, 1}, + {"LOG_DEBUG", Const, 0}, + {"LOG_EMERG", Const, 0}, + {"LOG_ERR", Const, 0}, + {"LOG_FTP", Const, 1}, + {"LOG_INFO", Const, 0}, + {"LOG_KERN", Const, 1}, + {"LOG_LOCAL0", Const, 1}, + {"LOG_LOCAL1", Const, 1}, + {"LOG_LOCAL2", Const, 1}, + {"LOG_LOCAL3", Const, 1}, + {"LOG_LOCAL4", Const, 1}, + {"LOG_LOCAL5", Const, 1}, + {"LOG_LOCAL6", Const, 1}, + {"LOG_LOCAL7", Const, 1}, + {"LOG_LPR", Const, 1}, + {"LOG_MAIL", Const, 1}, + {"LOG_NEWS", Const, 1}, + {"LOG_NOTICE", Const, 0}, + {"LOG_SYSLOG", Const, 1}, + {"LOG_USER", Const, 1}, + {"LOG_UUCP", Const, 1}, + {"LOG_WARNING", Const, 0}, + {"New", Func, 0}, + {"NewLogger", Func, 0}, + {"Priority", Type, 0}, + {"Writer", Type, 0}, + }, + "maps": { + {"Clone", Func, 21}, + {"Copy", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + }, + "math": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atan2", Func, 0}, + {"Atanh", Func, 0}, + {"Cbrt", Func, 0}, + {"Ceil", Func, 0}, + {"Copysign", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Dim", Func, 0}, + {"E", Const, 0}, + {"Erf", Func, 0}, + {"Erfc", Func, 0}, + {"Erfcinv", Func, 10}, + {"Erfinv", Func, 10}, + {"Exp", Func, 0}, + {"Exp2", Func, 0}, + {"Expm1", Func, 0}, + {"FMA", Func, 14}, + {"Float32bits", Func, 0}, + {"Float32frombits", Func, 0}, + {"Float64bits", Func, 0}, + {"Float64frombits", Func, 0}, + {"Floor", Func, 0}, + {"Frexp", Func, 0}, + {"Gamma", Func, 0}, + {"Hypot", Func, 0}, + {"Ilogb", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"J0", Func, 0}, + {"J1", Func, 0}, + {"Jn", Func, 0}, + {"Ldexp", Func, 0}, + {"Lgamma", Func, 0}, + {"Ln10", Const, 0}, + {"Ln2", Const, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"Log10E", Const, 0}, + {"Log1p", Func, 0}, + {"Log2", Func, 0}, + {"Log2E", Const, 0}, + {"Logb", Func, 0}, + {"Max", Func, 0}, + {"MaxFloat32", Const, 0}, + {"MaxFloat64", Const, 0}, + {"MaxInt", Const, 17}, + {"MaxInt16", Const, 0}, + {"MaxInt32", Const, 0}, + {"MaxInt64", Const, 0}, + {"MaxInt8", Const, 0}, + {"MaxUint", Const, 17}, + {"MaxUint16", Const, 0}, + {"MaxUint32", Const, 0}, + {"MaxUint64", Const, 0}, + {"MaxUint8", Const, 0}, + {"Min", Func, 0}, + {"MinInt", Const, 17}, + {"MinInt16", Const, 0}, + {"MinInt32", Const, 0}, + {"MinInt64", Const, 0}, + {"MinInt8", Const, 0}, + {"Mod", Func, 0}, + {"Modf", Func, 0}, + {"NaN", Func, 0}, + {"Nextafter", Func, 0}, + {"Nextafter32", Func, 4}, + {"Phi", Const, 0}, + {"Pi", Const, 0}, + {"Pow", Func, 0}, + {"Pow10", Func, 0}, + {"Remainder", Func, 0}, + {"Round", Func, 10}, + {"RoundToEven", Func, 10}, + {"Signbit", Func, 0}, + {"Sin", Func, 0}, + {"Sincos", Func, 0}, + {"Sinh", Func, 0}, + {"SmallestNonzeroFloat32", Const, 0}, + {"SmallestNonzeroFloat64", Const, 0}, + {"Sqrt", Func, 0}, + {"Sqrt2", Const, 0}, + {"SqrtE", Const, 0}, + {"SqrtPhi", Const, 0}, + {"SqrtPi", Const, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + {"Trunc", Func, 0}, + {"Y0", Func, 0}, + {"Y1", Func, 0}, + {"Yn", Func, 0}, + }, + "math/big": { + {"(*Float).Abs", Method, 5}, + {"(*Float).Acc", Method, 5}, + {"(*Float).Add", Method, 5}, + {"(*Float).Append", Method, 5}, + {"(*Float).Cmp", Method, 5}, + {"(*Float).Copy", Method, 5}, + {"(*Float).Float32", Method, 5}, + {"(*Float).Float64", Method, 5}, + {"(*Float).Format", Method, 5}, + {"(*Float).GobDecode", Method, 7}, + {"(*Float).GobEncode", Method, 7}, + {"(*Float).Int", Method, 5}, + {"(*Float).Int64", Method, 5}, + {"(*Float).IsInf", Method, 5}, + {"(*Float).IsInt", Method, 5}, + {"(*Float).MantExp", Method, 5}, + {"(*Float).MarshalText", Method, 6}, + {"(*Float).MinPrec", Method, 5}, + {"(*Float).Mode", Method, 5}, + {"(*Float).Mul", Method, 5}, + {"(*Float).Neg", Method, 5}, + {"(*Float).Parse", Method, 5}, + {"(*Float).Prec", Method, 5}, + {"(*Float).Quo", Method, 5}, + {"(*Float).Rat", Method, 5}, + {"(*Float).Scan", Method, 8}, + {"(*Float).Set", Method, 5}, + {"(*Float).SetFloat64", Method, 5}, + {"(*Float).SetInf", Method, 5}, + {"(*Float).SetInt", Method, 5}, + {"(*Float).SetInt64", Method, 5}, + {"(*Float).SetMantExp", Method, 5}, + {"(*Float).SetMode", Method, 5}, + {"(*Float).SetPrec", Method, 5}, + {"(*Float).SetRat", Method, 5}, + {"(*Float).SetString", Method, 5}, + {"(*Float).SetUint64", Method, 5}, + {"(*Float).Sign", Method, 5}, + {"(*Float).Signbit", Method, 5}, + {"(*Float).Sqrt", Method, 10}, + {"(*Float).String", Method, 5}, + {"(*Float).Sub", Method, 5}, + {"(*Float).Text", Method, 5}, + {"(*Float).Uint64", Method, 5}, + {"(*Float).UnmarshalText", Method, 6}, + {"(*Int).Abs", Method, 0}, + {"(*Int).Add", Method, 0}, + {"(*Int).And", Method, 0}, + {"(*Int).AndNot", Method, 0}, + {"(*Int).Append", Method, 6}, + {"(*Int).Binomial", Method, 0}, + {"(*Int).Bit", Method, 0}, + {"(*Int).BitLen", Method, 0}, + {"(*Int).Bits", Method, 0}, + {"(*Int).Bytes", Method, 0}, + {"(*Int).Cmp", Method, 0}, + {"(*Int).CmpAbs", Method, 10}, + {"(*Int).Div", Method, 0}, + {"(*Int).DivMod", Method, 0}, + {"(*Int).Exp", Method, 0}, + {"(*Int).FillBytes", Method, 15}, + {"(*Int).Float64", Method, 21}, + {"(*Int).Format", Method, 0}, + {"(*Int).GCD", Method, 0}, + {"(*Int).GobDecode", Method, 0}, + {"(*Int).GobEncode", Method, 0}, + {"(*Int).Int64", Method, 0}, + {"(*Int).IsInt64", Method, 9}, + {"(*Int).IsUint64", Method, 9}, + {"(*Int).Lsh", Method, 0}, + {"(*Int).MarshalJSON", Method, 1}, + {"(*Int).MarshalText", Method, 3}, + {"(*Int).Mod", Method, 0}, + {"(*Int).ModInverse", Method, 0}, + {"(*Int).ModSqrt", Method, 5}, + {"(*Int).Mul", Method, 0}, + {"(*Int).MulRange", Method, 0}, + {"(*Int).Neg", Method, 0}, + {"(*Int).Not", Method, 0}, + {"(*Int).Or", Method, 0}, + {"(*Int).ProbablyPrime", Method, 0}, + {"(*Int).Quo", Method, 0}, + {"(*Int).QuoRem", Method, 0}, + {"(*Int).Rand", Method, 0}, + {"(*Int).Rem", Method, 0}, + {"(*Int).Rsh", Method, 0}, + {"(*Int).Scan", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).SetBit", Method, 0}, + {"(*Int).SetBits", Method, 0}, + {"(*Int).SetBytes", Method, 0}, + {"(*Int).SetInt64", Method, 0}, + {"(*Int).SetString", Method, 0}, + {"(*Int).SetUint64", Method, 1}, + {"(*Int).Sign", Method, 0}, + {"(*Int).Sqrt", Method, 8}, + {"(*Int).String", Method, 0}, + {"(*Int).Sub", Method, 0}, + {"(*Int).Text", Method, 6}, + {"(*Int).TrailingZeroBits", Method, 13}, + {"(*Int).Uint64", Method, 1}, + {"(*Int).UnmarshalJSON", Method, 1}, + {"(*Int).UnmarshalText", Method, 3}, + {"(*Int).Xor", Method, 0}, + {"(*Rat).Abs", Method, 0}, + {"(*Rat).Add", Method, 0}, + {"(*Rat).Cmp", Method, 0}, + {"(*Rat).Denom", Method, 0}, + {"(*Rat).Float32", Method, 4}, + {"(*Rat).Float64", Method, 1}, + {"(*Rat).FloatPrec", Method, 22}, + {"(*Rat).FloatString", Method, 0}, + {"(*Rat).GobDecode", Method, 0}, + {"(*Rat).GobEncode", Method, 0}, + {"(*Rat).Inv", Method, 0}, + {"(*Rat).IsInt", Method, 0}, + {"(*Rat).MarshalText", Method, 3}, + {"(*Rat).Mul", Method, 0}, + {"(*Rat).Neg", Method, 0}, + {"(*Rat).Num", Method, 0}, + {"(*Rat).Quo", Method, 0}, + {"(*Rat).RatString", Method, 0}, + {"(*Rat).Scan", Method, 0}, + {"(*Rat).Set", Method, 0}, + {"(*Rat).SetFloat64", Method, 1}, + {"(*Rat).SetFrac", Method, 0}, + {"(*Rat).SetFrac64", Method, 0}, + {"(*Rat).SetInt", Method, 0}, + {"(*Rat).SetInt64", Method, 0}, + {"(*Rat).SetString", Method, 0}, + {"(*Rat).SetUint64", Method, 13}, + {"(*Rat).Sign", Method, 0}, + {"(*Rat).String", Method, 0}, + {"(*Rat).Sub", Method, 0}, + {"(*Rat).UnmarshalText", Method, 3}, + {"(Accuracy).String", Method, 5}, + {"(ErrNaN).Error", Method, 5}, + {"(RoundingMode).String", Method, 5}, + {"Above", Const, 5}, + {"Accuracy", Type, 5}, + {"AwayFromZero", Const, 5}, + {"Below", Const, 5}, + {"ErrNaN", Type, 5}, + {"Exact", Const, 5}, + {"Float", Type, 5}, + {"Int", Type, 0}, + {"Jacobi", Func, 5}, + {"MaxBase", Const, 0}, + {"MaxExp", Const, 5}, + {"MaxPrec", Const, 5}, + {"MinExp", Const, 5}, + {"NewFloat", Func, 5}, + {"NewInt", Func, 0}, + {"NewRat", Func, 0}, + {"ParseFloat", Func, 5}, + {"Rat", Type, 0}, + {"RoundingMode", Type, 5}, + {"ToNearestAway", Const, 5}, + {"ToNearestEven", Const, 5}, + {"ToNegativeInf", Const, 5}, + {"ToPositiveInf", Const, 5}, + {"ToZero", Const, 5}, + {"Word", Type, 0}, + }, + "math/bits": { + {"Add", Func, 12}, + {"Add32", Func, 12}, + {"Add64", Func, 12}, + {"Div", Func, 12}, + {"Div32", Func, 12}, + {"Div64", Func, 12}, + {"LeadingZeros", Func, 9}, + {"LeadingZeros16", Func, 9}, + {"LeadingZeros32", Func, 9}, + {"LeadingZeros64", Func, 9}, + {"LeadingZeros8", Func, 9}, + {"Len", Func, 9}, + {"Len16", Func, 9}, + {"Len32", Func, 9}, + {"Len64", Func, 9}, + {"Len8", Func, 9}, + {"Mul", Func, 12}, + {"Mul32", Func, 12}, + {"Mul64", Func, 12}, + {"OnesCount", Func, 9}, + {"OnesCount16", Func, 9}, + {"OnesCount32", Func, 9}, + {"OnesCount64", Func, 9}, + {"OnesCount8", Func, 9}, + {"Rem", Func, 14}, + {"Rem32", Func, 14}, + {"Rem64", Func, 14}, + {"Reverse", Func, 9}, + {"Reverse16", Func, 9}, + {"Reverse32", Func, 9}, + {"Reverse64", Func, 9}, + {"Reverse8", Func, 9}, + {"ReverseBytes", Func, 9}, + {"ReverseBytes16", Func, 9}, + {"ReverseBytes32", Func, 9}, + {"ReverseBytes64", Func, 9}, + {"RotateLeft", Func, 9}, + {"RotateLeft16", Func, 9}, + {"RotateLeft32", Func, 9}, + {"RotateLeft64", Func, 9}, + {"RotateLeft8", Func, 9}, + {"Sub", Func, 12}, + {"Sub32", Func, 12}, + {"Sub64", Func, 12}, + {"TrailingZeros", Func, 9}, + {"TrailingZeros16", Func, 9}, + {"TrailingZeros32", Func, 9}, + {"TrailingZeros64", Func, 9}, + {"TrailingZeros8", Func, 9}, + {"UintSize", Const, 9}, + }, + "math/cmplx": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atanh", Func, 0}, + {"Conj", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Cot", Func, 0}, + {"Exp", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"NaN", Func, 0}, + {"Phase", Func, 0}, + {"Polar", Func, 0}, + {"Pow", Func, 0}, + {"Rect", Func, 0}, + {"Sin", Func, 0}, + {"Sinh", Func, 0}, + {"Sqrt", Func, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0}, + {"(*Rand).Float32", Method, 0}, + {"(*Rand).Float64", Method, 0}, + {"(*Rand).Int", Method, 0}, + {"(*Rand).Int31", Method, 0}, + {"(*Rand).Int31n", Method, 0}, + {"(*Rand).Int63", Method, 0}, + {"(*Rand).Int63n", Method, 0}, + {"(*Rand).Intn", Method, 0}, + {"(*Rand).NormFloat64", Method, 0}, + {"(*Rand).Perm", Method, 0}, + {"(*Rand).Read", Method, 6}, + {"(*Rand).Seed", Method, 0}, + {"(*Rand).Shuffle", Method, 10}, + {"(*Rand).Uint32", Method, 0}, + {"(*Rand).Uint64", Method, 8}, + {"(*Zipf).Uint64", Method, 0}, + {"ExpFloat64", Func, 0}, + {"Float32", Func, 0}, + {"Float64", Func, 0}, + {"Int", Func, 0}, + {"Int31", Func, 0}, + {"Int31n", Func, 0}, + {"Int63", Func, 0}, + {"Int63n", Func, 0}, + {"Intn", Func, 0}, + {"New", Func, 0}, + {"NewSource", Func, 0}, + {"NewZipf", Func, 0}, + {"NormFloat64", Func, 0}, + {"Perm", Func, 0}, + {"Rand", Type, 0}, + {"Read", Func, 6}, + {"Seed", Func, 0}, + {"Shuffle", Func, 10}, + {"Source", Type, 0}, + {"Source64", Type, 8}, + {"Uint32", Func, 0}, + {"Uint64", Func, 8}, + {"Zipf", Type, 0}, + }, + "math/rand/v2": { + {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Seed", Method, 22}, + {"(*ChaCha8).Uint64", Method, 22}, + {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).MarshalBinary", Method, 22}, + {"(*PCG).Seed", Method, 22}, + {"(*PCG).Uint64", Method, 22}, + {"(*PCG).UnmarshalBinary", Method, 22}, + {"(*Rand).ExpFloat64", Method, 22}, + {"(*Rand).Float32", Method, 22}, + {"(*Rand).Float64", Method, 22}, + {"(*Rand).Int", Method, 22}, + {"(*Rand).Int32", Method, 22}, + {"(*Rand).Int32N", Method, 22}, + {"(*Rand).Int64", Method, 22}, + {"(*Rand).Int64N", Method, 22}, + {"(*Rand).IntN", Method, 22}, + {"(*Rand).NormFloat64", Method, 22}, + {"(*Rand).Perm", Method, 22}, + {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint32", Method, 22}, + {"(*Rand).Uint32N", Method, 22}, + {"(*Rand).Uint64", Method, 22}, + {"(*Rand).Uint64N", Method, 22}, + {"(*Rand).UintN", Method, 22}, + {"(*Zipf).Uint64", Method, 22}, + {"ChaCha8", Type, 22}, + {"ExpFloat64", Func, 22}, + {"Float32", Func, 22}, + {"Float64", Func, 22}, + {"Int", Func, 22}, + {"Int32", Func, 22}, + {"Int32N", Func, 22}, + {"Int64", Func, 22}, + {"Int64N", Func, 22}, + {"IntN", Func, 22}, + {"N", Func, 22}, + {"New", Func, 22}, + {"NewChaCha8", Func, 22}, + {"NewPCG", Func, 22}, + {"NewZipf", Func, 22}, + {"NormFloat64", Func, 22}, + {"PCG", Type, 22}, + {"Perm", Func, 22}, + {"Rand", Type, 22}, + {"Shuffle", Func, 22}, + {"Source", Type, 22}, + {"Uint32", Func, 22}, + {"Uint32N", Func, 22}, + {"Uint64", Func, 22}, + {"Uint64N", Func, 22}, + {"UintN", Func, 22}, + {"Zipf", Type, 22}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5}, + {"(*WordDecoder).DecodeHeader", Method, 5}, + {"(WordEncoder).Encode", Method, 5}, + {"AddExtensionType", Func, 0}, + {"BEncoding", Const, 5}, + {"ErrInvalidMediaParameter", Var, 9}, + {"ExtensionsByType", Func, 5}, + {"FormatMediaType", Func, 0}, + {"ParseMediaType", Func, 0}, + {"QEncoding", Const, 5}, + {"TypeByExtension", Func, 0}, + {"WordDecoder", Type, 5}, + {"WordDecoder.CharsetReader", Field, 5}, + {"WordEncoder", Type, 5}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0}, + {"(*Form).RemoveAll", Method, 0}, + {"(*Part).Close", Method, 0}, + {"(*Part).FileName", Method, 0}, + {"(*Part).FormName", Method, 0}, + {"(*Part).Read", Method, 0}, + {"(*Reader).NextPart", Method, 0}, + {"(*Reader).NextRawPart", Method, 14}, + {"(*Reader).ReadForm", Method, 0}, + {"(*Writer).Boundary", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).CreateFormField", Method, 0}, + {"(*Writer).CreateFormFile", Method, 0}, + {"(*Writer).CreatePart", Method, 0}, + {"(*Writer).FormDataContentType", Method, 0}, + {"(*Writer).SetBoundary", Method, 1}, + {"(*Writer).WriteField", Method, 0}, + {"ErrMessageTooLarge", Var, 9}, + {"File", Type, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Filename", Field, 0}, + {"FileHeader.Header", Field, 0}, + {"FileHeader.Size", Field, 9}, + {"Form", Type, 0}, + {"Form.File", Field, 0}, + {"Form.Value", Field, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Part", Type, 0}, + {"Part.Header", Field, 0}, + {"Reader", Type, 0}, + {"Writer", Type, 0}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5}, + {"(*Writer).Close", Method, 5}, + {"(*Writer).Write", Method, 5}, + {"NewReader", Func, 5}, + {"NewWriter", Func, 5}, + {"Reader", Type, 5}, + {"Writer", Type, 5}, + {"Writer.Binary", Field, 5}, + }, + "net": { + {"(*AddrError).Error", Method, 0}, + {"(*AddrError).Temporary", Method, 0}, + {"(*AddrError).Timeout", Method, 0}, + {"(*Buffers).Read", Method, 8}, + {"(*Buffers).WriteTo", Method, 8}, + {"(*DNSConfigError).Error", Method, 0}, + {"(*DNSConfigError).Temporary", Method, 0}, + {"(*DNSConfigError).Timeout", Method, 0}, + {"(*DNSConfigError).Unwrap", Method, 13}, + {"(*DNSError).Error", Method, 0}, + {"(*DNSError).Temporary", Method, 0}, + {"(*DNSError).Timeout", Method, 0}, + {"(*Dialer).Dial", Method, 1}, + {"(*Dialer).DialContext", Method, 7}, + {"(*Dialer).MultipathTCP", Method, 21}, + {"(*Dialer).SetMultipathTCP", Method, 21}, + {"(*IP).UnmarshalText", Method, 2}, + {"(*IPAddr).Network", Method, 0}, + {"(*IPAddr).String", Method, 0}, + {"(*IPConn).Close", Method, 0}, + {"(*IPConn).File", Method, 0}, + {"(*IPConn).LocalAddr", Method, 0}, + {"(*IPConn).Read", Method, 0}, + {"(*IPConn).ReadFrom", Method, 0}, + {"(*IPConn).ReadFromIP", Method, 0}, + {"(*IPConn).ReadMsgIP", Method, 1}, + {"(*IPConn).RemoteAddr", Method, 0}, + {"(*IPConn).SetDeadline", Method, 0}, + {"(*IPConn).SetReadBuffer", Method, 0}, + {"(*IPConn).SetReadDeadline", Method, 0}, + {"(*IPConn).SetWriteBuffer", Method, 0}, + {"(*IPConn).SetWriteDeadline", Method, 0}, + {"(*IPConn).SyscallConn", Method, 9}, + {"(*IPConn).Write", Method, 0}, + {"(*IPConn).WriteMsgIP", Method, 1}, + {"(*IPConn).WriteTo", Method, 0}, + {"(*IPConn).WriteToIP", Method, 0}, + {"(*IPNet).Contains", Method, 0}, + {"(*IPNet).Network", Method, 0}, + {"(*IPNet).String", Method, 0}, + {"(*Interface).Addrs", Method, 0}, + {"(*Interface).MulticastAddrs", Method, 0}, + {"(*ListenConfig).Listen", Method, 11}, + {"(*ListenConfig).ListenPacket", Method, 11}, + {"(*ListenConfig).MultipathTCP", Method, 21}, + {"(*ListenConfig).SetMultipathTCP", Method, 21}, + {"(*OpError).Error", Method, 0}, + {"(*OpError).Temporary", Method, 0}, + {"(*OpError).Timeout", Method, 0}, + {"(*OpError).Unwrap", Method, 13}, + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Temporary", Method, 17}, + {"(*ParseError).Timeout", Method, 17}, + {"(*Resolver).LookupAddr", Method, 8}, + {"(*Resolver).LookupCNAME", Method, 8}, + {"(*Resolver).LookupHost", Method, 8}, + {"(*Resolver).LookupIP", Method, 15}, + {"(*Resolver).LookupIPAddr", Method, 8}, + {"(*Resolver).LookupMX", Method, 8}, + {"(*Resolver).LookupNS", Method, 8}, + {"(*Resolver).LookupNetIP", Method, 18}, + {"(*Resolver).LookupPort", Method, 8}, + {"(*Resolver).LookupSRV", Method, 8}, + {"(*Resolver).LookupTXT", Method, 8}, + {"(*TCPAddr).AddrPort", Method, 18}, + {"(*TCPAddr).Network", Method, 0}, + {"(*TCPAddr).String", Method, 0}, + {"(*TCPConn).Close", Method, 0}, + {"(*TCPConn).CloseRead", Method, 0}, + {"(*TCPConn).CloseWrite", Method, 0}, + {"(*TCPConn).File", Method, 0}, + {"(*TCPConn).LocalAddr", Method, 0}, + {"(*TCPConn).MultipathTCP", Method, 21}, + {"(*TCPConn).Read", Method, 0}, + {"(*TCPConn).ReadFrom", Method, 0}, + {"(*TCPConn).RemoteAddr", Method, 0}, + {"(*TCPConn).SetDeadline", Method, 0}, + {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, + {"(*TCPConn).SetLinger", Method, 0}, + {"(*TCPConn).SetNoDelay", Method, 0}, + {"(*TCPConn).SetReadBuffer", Method, 0}, + {"(*TCPConn).SetReadDeadline", Method, 0}, + {"(*TCPConn).SetWriteBuffer", Method, 0}, + {"(*TCPConn).SetWriteDeadline", Method, 0}, + {"(*TCPConn).SyscallConn", Method, 9}, + {"(*TCPConn).Write", Method, 0}, + {"(*TCPConn).WriteTo", Method, 22}, + {"(*TCPListener).Accept", Method, 0}, + {"(*TCPListener).AcceptTCP", Method, 0}, + {"(*TCPListener).Addr", Method, 0}, + {"(*TCPListener).Close", Method, 0}, + {"(*TCPListener).File", Method, 0}, + {"(*TCPListener).SetDeadline", Method, 0}, + {"(*TCPListener).SyscallConn", Method, 10}, + {"(*UDPAddr).AddrPort", Method, 18}, + {"(*UDPAddr).Network", Method, 0}, + {"(*UDPAddr).String", Method, 0}, + {"(*UDPConn).Close", Method, 0}, + {"(*UDPConn).File", Method, 0}, + {"(*UDPConn).LocalAddr", Method, 0}, + {"(*UDPConn).Read", Method, 0}, + {"(*UDPConn).ReadFrom", Method, 0}, + {"(*UDPConn).ReadFromUDP", Method, 0}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18}, + {"(*UDPConn).ReadMsgUDP", Method, 1}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).RemoteAddr", Method, 0}, + {"(*UDPConn).SetDeadline", Method, 0}, + {"(*UDPConn).SetReadBuffer", Method, 0}, + {"(*UDPConn).SetReadDeadline", Method, 0}, + {"(*UDPConn).SetWriteBuffer", Method, 0}, + {"(*UDPConn).SetWriteDeadline", Method, 0}, + {"(*UDPConn).SyscallConn", Method, 9}, + {"(*UDPConn).Write", Method, 0}, + {"(*UDPConn).WriteMsgUDP", Method, 1}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).WriteTo", Method, 0}, + {"(*UDPConn).WriteToUDP", Method, 0}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18}, + {"(*UnixAddr).Network", Method, 0}, + {"(*UnixAddr).String", Method, 0}, + {"(*UnixConn).Close", Method, 0}, + {"(*UnixConn).CloseRead", Method, 1}, + {"(*UnixConn).CloseWrite", Method, 1}, + {"(*UnixConn).File", Method, 0}, + {"(*UnixConn).LocalAddr", Method, 0}, + {"(*UnixConn).Read", Method, 0}, + {"(*UnixConn).ReadFrom", Method, 0}, + {"(*UnixConn).ReadFromUnix", Method, 0}, + {"(*UnixConn).ReadMsgUnix", Method, 0}, + {"(*UnixConn).RemoteAddr", Method, 0}, + {"(*UnixConn).SetDeadline", Method, 0}, + {"(*UnixConn).SetReadBuffer", Method, 0}, + {"(*UnixConn).SetReadDeadline", Method, 0}, + {"(*UnixConn).SetWriteBuffer", Method, 0}, + {"(*UnixConn).SetWriteDeadline", Method, 0}, + {"(*UnixConn).SyscallConn", Method, 9}, + {"(*UnixConn).Write", Method, 0}, + {"(*UnixConn).WriteMsgUnix", Method, 0}, + {"(*UnixConn).WriteTo", Method, 0}, + {"(*UnixConn).WriteToUnix", Method, 0}, + {"(*UnixListener).Accept", Method, 0}, + {"(*UnixListener).AcceptUnix", Method, 0}, + {"(*UnixListener).Addr", Method, 0}, + {"(*UnixListener).Close", Method, 0}, + {"(*UnixListener).File", Method, 0}, + {"(*UnixListener).SetDeadline", Method, 0}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8}, + {"(*UnixListener).SyscallConn", Method, 10}, + {"(Flags).String", Method, 0}, + {"(HardwareAddr).String", Method, 0}, + {"(IP).DefaultMask", Method, 0}, + {"(IP).Equal", Method, 0}, + {"(IP).IsGlobalUnicast", Method, 0}, + {"(IP).IsInterfaceLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalUnicast", Method, 0}, + {"(IP).IsLoopback", Method, 0}, + {"(IP).IsMulticast", Method, 0}, + {"(IP).IsPrivate", Method, 17}, + {"(IP).IsUnspecified", Method, 0}, + {"(IP).MarshalText", Method, 2}, + {"(IP).Mask", Method, 0}, + {"(IP).String", Method, 0}, + {"(IP).To16", Method, 0}, + {"(IP).To4", Method, 0}, + {"(IPMask).Size", Method, 0}, + {"(IPMask).String", Method, 0}, + {"(InvalidAddrError).Error", Method, 0}, + {"(InvalidAddrError).Temporary", Method, 0}, + {"(InvalidAddrError).Timeout", Method, 0}, + {"(UnknownNetworkError).Error", Method, 0}, + {"(UnknownNetworkError).Temporary", Method, 0}, + {"(UnknownNetworkError).Timeout", Method, 0}, + {"Addr", Type, 0}, + {"AddrError", Type, 0}, + {"AddrError.Addr", Field, 0}, + {"AddrError.Err", Field, 0}, + {"Buffers", Type, 8}, + {"CIDRMask", Func, 0}, + {"Conn", Type, 0}, + {"DNSConfigError", Type, 0}, + {"DNSConfigError.Err", Field, 0}, + {"DNSError", Type, 0}, + {"DNSError.Err", Field, 0}, + {"DNSError.IsNotFound", Field, 13}, + {"DNSError.IsTemporary", Field, 6}, + {"DNSError.IsTimeout", Field, 0}, + {"DNSError.Name", Field, 0}, + {"DNSError.Server", Field, 0}, + {"DefaultResolver", Var, 8}, + {"Dial", Func, 0}, + {"DialIP", Func, 0}, + {"DialTCP", Func, 0}, + {"DialTimeout", Func, 0}, + {"DialUDP", Func, 0}, + {"DialUnix", Func, 0}, + {"Dialer", Type, 1}, + {"Dialer.Cancel", Field, 6}, + {"Dialer.Control", Field, 11}, + {"Dialer.ControlContext", Field, 20}, + {"Dialer.Deadline", Field, 1}, + {"Dialer.DualStack", Field, 2}, + {"Dialer.FallbackDelay", Field, 5}, + {"Dialer.KeepAlive", Field, 3}, + {"Dialer.LocalAddr", Field, 1}, + {"Dialer.Resolver", Field, 8}, + {"Dialer.Timeout", Field, 1}, + {"ErrClosed", Var, 16}, + {"ErrWriteToConnected", Var, 0}, + {"Error", Type, 0}, + {"FileConn", Func, 0}, + {"FileListener", Func, 0}, + {"FilePacketConn", Func, 0}, + {"FlagBroadcast", Const, 0}, + {"FlagLoopback", Const, 0}, + {"FlagMulticast", Const, 0}, + {"FlagPointToPoint", Const, 0}, + {"FlagRunning", Const, 20}, + {"FlagUp", Const, 0}, + {"Flags", Type, 0}, + {"HardwareAddr", Type, 0}, + {"IP", Type, 0}, + {"IPAddr", Type, 0}, + {"IPAddr.IP", Field, 0}, + {"IPAddr.Zone", Field, 1}, + {"IPConn", Type, 0}, + {"IPMask", Type, 0}, + {"IPNet", Type, 0}, + {"IPNet.IP", Field, 0}, + {"IPNet.Mask", Field, 0}, + {"IPv4", Func, 0}, + {"IPv4Mask", Func, 0}, + {"IPv4allrouter", Var, 0}, + {"IPv4allsys", Var, 0}, + {"IPv4bcast", Var, 0}, + {"IPv4len", Const, 0}, + {"IPv4zero", Var, 0}, + {"IPv6interfacelocalallnodes", Var, 0}, + {"IPv6len", Const, 0}, + {"IPv6linklocalallnodes", Var, 0}, + {"IPv6linklocalallrouters", Var, 0}, + {"IPv6loopback", Var, 0}, + {"IPv6unspecified", Var, 0}, + {"IPv6zero", Var, 0}, + {"Interface", Type, 0}, + {"Interface.Flags", Field, 0}, + {"Interface.HardwareAddr", Field, 0}, + {"Interface.Index", Field, 0}, + {"Interface.MTU", Field, 0}, + {"Interface.Name", Field, 0}, + {"InterfaceAddrs", Func, 0}, + {"InterfaceByIndex", Func, 0}, + {"InterfaceByName", Func, 0}, + {"Interfaces", Func, 0}, + {"InvalidAddrError", Type, 0}, + {"JoinHostPort", Func, 0}, + {"Listen", Func, 0}, + {"ListenConfig", Type, 11}, + {"ListenConfig.Control", Field, 11}, + {"ListenConfig.KeepAlive", Field, 13}, + {"ListenIP", Func, 0}, + {"ListenMulticastUDP", Func, 0}, + {"ListenPacket", Func, 0}, + {"ListenTCP", Func, 0}, + {"ListenUDP", Func, 0}, + {"ListenUnix", Func, 0}, + {"ListenUnixgram", Func, 0}, + {"Listener", Type, 0}, + {"LookupAddr", Func, 0}, + {"LookupCNAME", Func, 0}, + {"LookupHost", Func, 0}, + {"LookupIP", Func, 0}, + {"LookupMX", Func, 0}, + {"LookupNS", Func, 1}, + {"LookupPort", Func, 0}, + {"LookupSRV", Func, 0}, + {"LookupTXT", Func, 0}, + {"MX", Type, 0}, + {"MX.Host", Field, 0}, + {"MX.Pref", Field, 0}, + {"NS", Type, 1}, + {"NS.Host", Field, 1}, + {"OpError", Type, 0}, + {"OpError.Addr", Field, 0}, + {"OpError.Err", Field, 0}, + {"OpError.Net", Field, 0}, + {"OpError.Op", Field, 0}, + {"OpError.Source", Field, 5}, + {"PacketConn", Type, 0}, + {"ParseCIDR", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Text", Field, 0}, + {"ParseError.Type", Field, 0}, + {"ParseIP", Func, 0}, + {"ParseMAC", Func, 0}, + {"Pipe", Func, 0}, + {"ResolveIPAddr", Func, 0}, + {"ResolveTCPAddr", Func, 0}, + {"ResolveUDPAddr", Func, 0}, + {"ResolveUnixAddr", Func, 0}, + {"Resolver", Type, 8}, + {"Resolver.Dial", Field, 9}, + {"Resolver.PreferGo", Field, 8}, + {"Resolver.StrictErrors", Field, 9}, + {"SRV", Type, 0}, + {"SRV.Port", Field, 0}, + {"SRV.Priority", Field, 0}, + {"SRV.Target", Field, 0}, + {"SRV.Weight", Field, 0}, + {"SplitHostPort", Func, 0}, + {"TCPAddr", Type, 0}, + {"TCPAddr.IP", Field, 0}, + {"TCPAddr.Port", Field, 0}, + {"TCPAddr.Zone", Field, 1}, + {"TCPAddrFromAddrPort", Func, 18}, + {"TCPConn", Type, 0}, + {"TCPListener", Type, 0}, + {"UDPAddr", Type, 0}, + {"UDPAddr.IP", Field, 0}, + {"UDPAddr.Port", Field, 0}, + {"UDPAddr.Zone", Field, 1}, + {"UDPAddrFromAddrPort", Func, 18}, + {"UDPConn", Type, 0}, + {"UnixAddr", Type, 0}, + {"UnixAddr.Name", Field, 0}, + {"UnixAddr.Net", Field, 0}, + {"UnixConn", Type, 0}, + {"UnixListener", Type, 0}, + {"UnknownNetworkError", Type, 0}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12}, + {"(*Client).Do", Method, 0}, + {"(*Client).Get", Method, 0}, + {"(*Client).Head", Method, 0}, + {"(*Client).Post", Method, 0}, + {"(*Client).PostForm", Method, 0}, + {"(*Cookie).String", Method, 0}, + {"(*Cookie).Valid", Method, 18}, + {"(*MaxBytesError).Error", Method, 19}, + {"(*ProtocolError).Error", Method, 0}, + {"(*ProtocolError).Is", Method, 21}, + {"(*Request).AddCookie", Method, 0}, + {"(*Request).BasicAuth", Method, 4}, + {"(*Request).Clone", Method, 13}, + {"(*Request).Context", Method, 7}, + {"(*Request).Cookie", Method, 0}, + {"(*Request).Cookies", Method, 0}, + {"(*Request).FormFile", Method, 0}, + {"(*Request).FormValue", Method, 0}, + {"(*Request).MultipartReader", Method, 0}, + {"(*Request).ParseForm", Method, 0}, + {"(*Request).ParseMultipartForm", Method, 0}, + {"(*Request).PathValue", Method, 22}, + {"(*Request).PostFormValue", Method, 1}, + {"(*Request).ProtoAtLeast", Method, 0}, + {"(*Request).Referer", Method, 0}, + {"(*Request).SetBasicAuth", Method, 0}, + {"(*Request).SetPathValue", Method, 22}, + {"(*Request).UserAgent", Method, 0}, + {"(*Request).WithContext", Method, 7}, + {"(*Request).Write", Method, 0}, + {"(*Request).WriteProxy", Method, 0}, + {"(*Response).Cookies", Method, 0}, + {"(*Response).Location", Method, 0}, + {"(*Response).ProtoAtLeast", Method, 0}, + {"(*Response).Write", Method, 0}, + {"(*ResponseController).EnableFullDuplex", Method, 21}, + {"(*ResponseController).Flush", Method, 20}, + {"(*ResponseController).Hijack", Method, 20}, + {"(*ResponseController).SetReadDeadline", Method, 20}, + {"(*ResponseController).SetWriteDeadline", Method, 20}, + {"(*ServeMux).Handle", Method, 0}, + {"(*ServeMux).HandleFunc", Method, 0}, + {"(*ServeMux).Handler", Method, 1}, + {"(*ServeMux).ServeHTTP", Method, 0}, + {"(*Server).Close", Method, 8}, + {"(*Server).ListenAndServe", Method, 0}, + {"(*Server).ListenAndServeTLS", Method, 0}, + {"(*Server).RegisterOnShutdown", Method, 9}, + {"(*Server).Serve", Method, 0}, + {"(*Server).ServeTLS", Method, 9}, + {"(*Server).SetKeepAlivesEnabled", Method, 3}, + {"(*Server).Shutdown", Method, 8}, + {"(*Transport).CancelRequest", Method, 1}, + {"(*Transport).Clone", Method, 13}, + {"(*Transport).CloseIdleConnections", Method, 0}, + {"(*Transport).RegisterProtocol", Method, 0}, + {"(*Transport).RoundTrip", Method, 0}, + {"(ConnState).String", Method, 3}, + {"(Dir).Open", Method, 0}, + {"(HandlerFunc).ServeHTTP", Method, 0}, + {"(Header).Add", Method, 0}, + {"(Header).Clone", Method, 13}, + {"(Header).Del", Method, 0}, + {"(Header).Get", Method, 0}, + {"(Header).Set", Method, 0}, + {"(Header).Values", Method, 14}, + {"(Header).Write", Method, 0}, + {"(Header).WriteSubset", Method, 0}, + {"AllowQuerySemicolons", Func, 17}, + {"CanonicalHeaderKey", Func, 0}, + {"Client", Type, 0}, + {"Client.CheckRedirect", Field, 0}, + {"Client.Jar", Field, 0}, + {"Client.Timeout", Field, 3}, + {"Client.Transport", Field, 0}, + {"CloseNotifier", Type, 1}, + {"ConnState", Type, 3}, + {"Cookie", Type, 0}, + {"Cookie.Domain", Field, 0}, + {"Cookie.Expires", Field, 0}, + {"Cookie.HttpOnly", Field, 0}, + {"Cookie.MaxAge", Field, 0}, + {"Cookie.Name", Field, 0}, + {"Cookie.Path", Field, 0}, + {"Cookie.Raw", Field, 0}, + {"Cookie.RawExpires", Field, 0}, + {"Cookie.SameSite", Field, 11}, + {"Cookie.Secure", Field, 0}, + {"Cookie.Unparsed", Field, 0}, + {"Cookie.Value", Field, 0}, + {"CookieJar", Type, 0}, + {"DefaultClient", Var, 0}, + {"DefaultMaxHeaderBytes", Const, 0}, + {"DefaultMaxIdleConnsPerHost", Const, 0}, + {"DefaultServeMux", Var, 0}, + {"DefaultTransport", Var, 0}, + {"DetectContentType", Func, 0}, + {"Dir", Type, 0}, + {"ErrAbortHandler", Var, 8}, + {"ErrBodyNotAllowed", Var, 0}, + {"ErrBodyReadAfterClose", Var, 0}, + {"ErrContentLength", Var, 0}, + {"ErrHandlerTimeout", Var, 0}, + {"ErrHeaderTooLong", Var, 0}, + {"ErrHijacked", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrMissingBoundary", Var, 0}, + {"ErrMissingContentLength", Var, 0}, + {"ErrMissingFile", Var, 0}, + {"ErrNoCookie", Var, 0}, + {"ErrNoLocation", Var, 0}, + {"ErrNotMultipart", Var, 0}, + {"ErrNotSupported", Var, 0}, + {"ErrSchemeMismatch", Var, 21}, + {"ErrServerClosed", Var, 8}, + {"ErrShortBody", Var, 0}, + {"ErrSkipAltProtocol", Var, 6}, + {"ErrUnexpectedTrailer", Var, 0}, + {"ErrUseLastResponse", Var, 7}, + {"ErrWriteAfterFlush", Var, 0}, + {"Error", Func, 0}, + {"FS", Func, 16}, + {"File", Type, 0}, + {"FileServer", Func, 0}, + {"FileServerFS", Func, 22}, + {"FileSystem", Type, 0}, + {"Flusher", Type, 0}, + {"Get", Func, 0}, + {"Handle", Func, 0}, + {"HandleFunc", Func, 0}, + {"Handler", Type, 0}, + {"HandlerFunc", Type, 0}, + {"Head", Func, 0}, + {"Header", Type, 0}, + {"Hijacker", Type, 0}, + {"ListenAndServe", Func, 0}, + {"ListenAndServeTLS", Func, 0}, + {"LocalAddrContextKey", Var, 7}, + {"MaxBytesError", Type, 19}, + {"MaxBytesError.Limit", Field, 19}, + {"MaxBytesHandler", Func, 18}, + {"MaxBytesReader", Func, 0}, + {"MethodConnect", Const, 6}, + {"MethodDelete", Const, 6}, + {"MethodGet", Const, 6}, + {"MethodHead", Const, 6}, + {"MethodOptions", Const, 6}, + {"MethodPatch", Const, 6}, + {"MethodPost", Const, 6}, + {"MethodPut", Const, 6}, + {"MethodTrace", Const, 6}, + {"NewFileTransport", Func, 0}, + {"NewFileTransportFS", Func, 22}, + {"NewRequest", Func, 0}, + {"NewRequestWithContext", Func, 13}, + {"NewResponseController", Func, 20}, + {"NewServeMux", Func, 0}, + {"NoBody", Var, 8}, + {"NotFound", Func, 0}, + {"NotFoundHandler", Func, 0}, + {"ParseHTTPVersion", Func, 0}, + {"ParseTime", Func, 1}, + {"Post", Func, 0}, + {"PostForm", Func, 0}, + {"ProtocolError", Type, 0}, + {"ProtocolError.ErrorString", Field, 0}, + {"ProxyFromEnvironment", Func, 0}, + {"ProxyURL", Func, 0}, + {"PushOptions", Type, 8}, + {"PushOptions.Header", Field, 8}, + {"PushOptions.Method", Field, 8}, + {"Pusher", Type, 8}, + {"ReadRequest", Func, 0}, + {"ReadResponse", Func, 0}, + {"Redirect", Func, 0}, + {"RedirectHandler", Func, 0}, + {"Request", Type, 0}, + {"Request.Body", Field, 0}, + {"Request.Cancel", Field, 5}, + {"Request.Close", Field, 0}, + {"Request.ContentLength", Field, 0}, + {"Request.Form", Field, 0}, + {"Request.GetBody", Field, 8}, + {"Request.Header", Field, 0}, + {"Request.Host", Field, 0}, + {"Request.Method", Field, 0}, + {"Request.MultipartForm", Field, 0}, + {"Request.PostForm", Field, 1}, + {"Request.Proto", Field, 0}, + {"Request.ProtoMajor", Field, 0}, + {"Request.ProtoMinor", Field, 0}, + {"Request.RemoteAddr", Field, 0}, + {"Request.RequestURI", Field, 0}, + {"Request.Response", Field, 7}, + {"Request.TLS", Field, 0}, + {"Request.Trailer", Field, 0}, + {"Request.TransferEncoding", Field, 0}, + {"Request.URL", Field, 0}, + {"Response", Type, 0}, + {"Response.Body", Field, 0}, + {"Response.Close", Field, 0}, + {"Response.ContentLength", Field, 0}, + {"Response.Header", Field, 0}, + {"Response.Proto", Field, 0}, + {"Response.ProtoMajor", Field, 0}, + {"Response.ProtoMinor", Field, 0}, + {"Response.Request", Field, 0}, + {"Response.Status", Field, 0}, + {"Response.StatusCode", Field, 0}, + {"Response.TLS", Field, 3}, + {"Response.Trailer", Field, 0}, + {"Response.TransferEncoding", Field, 0}, + {"Response.Uncompressed", Field, 7}, + {"ResponseController", Type, 20}, + {"ResponseWriter", Type, 0}, + {"RoundTripper", Type, 0}, + {"SameSite", Type, 11}, + {"SameSiteDefaultMode", Const, 11}, + {"SameSiteLaxMode", Const, 11}, + {"SameSiteNoneMode", Const, 13}, + {"SameSiteStrictMode", Const, 11}, + {"Serve", Func, 0}, + {"ServeContent", Func, 0}, + {"ServeFile", Func, 0}, + {"ServeFileFS", Func, 22}, + {"ServeMux", Type, 0}, + {"ServeTLS", Func, 9}, + {"Server", Type, 0}, + {"Server.Addr", Field, 0}, + {"Server.BaseContext", Field, 13}, + {"Server.ConnContext", Field, 13}, + {"Server.ConnState", Field, 3}, + {"Server.DisableGeneralOptionsHandler", Field, 20}, + {"Server.ErrorLog", Field, 3}, + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, + {"Server.ReadHeaderTimeout", Field, 8}, + {"Server.ReadTimeout", Field, 0}, + {"Server.TLSConfig", Field, 0}, + {"Server.TLSNextProto", Field, 1}, + {"Server.WriteTimeout", Field, 0}, + {"ServerContextKey", Var, 7}, + {"SetCookie", Func, 0}, + {"StateActive", Const, 3}, + {"StateClosed", Const, 3}, + {"StateHijacked", Const, 3}, + {"StateIdle", Const, 3}, + {"StateNew", Const, 3}, + {"StatusAccepted", Const, 0}, + {"StatusAlreadyReported", Const, 7}, + {"StatusBadGateway", Const, 0}, + {"StatusBadRequest", Const, 0}, + {"StatusConflict", Const, 0}, + {"StatusContinue", Const, 0}, + {"StatusCreated", Const, 0}, + {"StatusEarlyHints", Const, 13}, + {"StatusExpectationFailed", Const, 0}, + {"StatusFailedDependency", Const, 7}, + {"StatusForbidden", Const, 0}, + {"StatusFound", Const, 0}, + {"StatusGatewayTimeout", Const, 0}, + {"StatusGone", Const, 0}, + {"StatusHTTPVersionNotSupported", Const, 0}, + {"StatusIMUsed", Const, 7}, + {"StatusInsufficientStorage", Const, 7}, + {"StatusInternalServerError", Const, 0}, + {"StatusLengthRequired", Const, 0}, + {"StatusLocked", Const, 7}, + {"StatusLoopDetected", Const, 7}, + {"StatusMethodNotAllowed", Const, 0}, + {"StatusMisdirectedRequest", Const, 11}, + {"StatusMovedPermanently", Const, 0}, + {"StatusMultiStatus", Const, 7}, + {"StatusMultipleChoices", Const, 0}, + {"StatusNetworkAuthenticationRequired", Const, 6}, + {"StatusNoContent", Const, 0}, + {"StatusNonAuthoritativeInfo", Const, 0}, + {"StatusNotAcceptable", Const, 0}, + {"StatusNotExtended", Const, 7}, + {"StatusNotFound", Const, 0}, + {"StatusNotImplemented", Const, 0}, + {"StatusNotModified", Const, 0}, + {"StatusOK", Const, 0}, + {"StatusPartialContent", Const, 0}, + {"StatusPaymentRequired", Const, 0}, + {"StatusPermanentRedirect", Const, 7}, + {"StatusPreconditionFailed", Const, 0}, + {"StatusPreconditionRequired", Const, 6}, + {"StatusProcessing", Const, 7}, + {"StatusProxyAuthRequired", Const, 0}, + {"StatusRequestEntityTooLarge", Const, 0}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6}, + {"StatusRequestTimeout", Const, 0}, + {"StatusRequestURITooLong", Const, 0}, + {"StatusRequestedRangeNotSatisfiable", Const, 0}, + {"StatusResetContent", Const, 0}, + {"StatusSeeOther", Const, 0}, + {"StatusServiceUnavailable", Const, 0}, + {"StatusSwitchingProtocols", Const, 0}, + {"StatusTeapot", Const, 0}, + {"StatusTemporaryRedirect", Const, 0}, + {"StatusText", Func, 0}, + {"StatusTooEarly", Const, 12}, + {"StatusTooManyRequests", Const, 6}, + {"StatusUnauthorized", Const, 0}, + {"StatusUnavailableForLegalReasons", Const, 6}, + {"StatusUnprocessableEntity", Const, 7}, + {"StatusUnsupportedMediaType", Const, 0}, + {"StatusUpgradeRequired", Const, 7}, + {"StatusUseProxy", Const, 0}, + {"StatusVariantAlsoNegotiates", Const, 7}, + {"StripPrefix", Func, 0}, + {"TimeFormat", Const, 0}, + {"TimeoutHandler", Func, 0}, + {"TrailerPrefix", Const, 8}, + {"Transport", Type, 0}, + {"Transport.Dial", Field, 0}, + {"Transport.DialContext", Field, 7}, + {"Transport.DialTLS", Field, 4}, + {"Transport.DialTLSContext", Field, 14}, + {"Transport.DisableCompression", Field, 0}, + {"Transport.DisableKeepAlives", Field, 0}, + {"Transport.ExpectContinueTimeout", Field, 6}, + {"Transport.ForceAttemptHTTP2", Field, 13}, + {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.IdleConnTimeout", Field, 7}, + {"Transport.MaxConnsPerHost", Field, 11}, + {"Transport.MaxIdleConns", Field, 7}, + {"Transport.MaxIdleConnsPerHost", Field, 0}, + {"Transport.MaxResponseHeaderBytes", Field, 7}, + {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Proxy", Field, 0}, + {"Transport.ProxyConnectHeader", Field, 8}, + {"Transport.ReadBufferSize", Field, 13}, + {"Transport.ResponseHeaderTimeout", Field, 1}, + {"Transport.TLSClientConfig", Field, 0}, + {"Transport.TLSHandshakeTimeout", Field, 3}, + {"Transport.TLSNextProto", Field, 6}, + {"Transport.WriteBufferSize", Field, 13}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0}, + {"Handler", Type, 0}, + {"Handler.Args", Field, 0}, + {"Handler.Dir", Field, 0}, + {"Handler.Env", Field, 0}, + {"Handler.InheritEnv", Field, 0}, + {"Handler.Logger", Field, 0}, + {"Handler.Path", Field, 0}, + {"Handler.PathLocationHandler", Field, 0}, + {"Handler.Root", Field, 0}, + {"Handler.Stderr", Field, 7}, + {"Request", Func, 0}, + {"RequestFromMap", Func, 0}, + {"Serve", Func, 0}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1}, + {"(*Jar).SetCookies", Method, 1}, + {"Jar", Type, 1}, + {"New", Func, 1}, + {"Options", Type, 1}, + {"Options.PublicSuffixList", Field, 1}, + {"PublicSuffixList", Type, 1}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5}, + {"ErrRequestAborted", Var, 5}, + {"ProcessEnv", Func, 9}, + {"Serve", Func, 0}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0}, + {"(*ResponseRecorder).Header", Method, 0}, + {"(*ResponseRecorder).Result", Method, 7}, + {"(*ResponseRecorder).Write", Method, 0}, + {"(*ResponseRecorder).WriteHeader", Method, 0}, + {"(*ResponseRecorder).WriteString", Method, 6}, + {"(*Server).Certificate", Method, 9}, + {"(*Server).Client", Method, 9}, + {"(*Server).Close", Method, 0}, + {"(*Server).CloseClientConnections", Method, 0}, + {"(*Server).Start", Method, 0}, + {"(*Server).StartTLS", Method, 0}, + {"DefaultRemoteAddr", Const, 0}, + {"NewRecorder", Func, 0}, + {"NewRequest", Func, 7}, + {"NewServer", Func, 0}, + {"NewTLSServer", Func, 0}, + {"NewUnstartedServer", Func, 0}, + {"ResponseRecorder", Type, 0}, + {"ResponseRecorder.Body", Field, 0}, + {"ResponseRecorder.Code", Field, 0}, + {"ResponseRecorder.Flushed", Field, 0}, + {"ResponseRecorder.HeaderMap", Field, 0}, + {"Server", Type, 0}, + {"Server.Config", Field, 0}, + {"Server.EnableHTTP2", Field, 14}, + {"Server.Listener", Field, 0}, + {"Server.TLS", Field, 0}, + {"Server.URL", Field, 0}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7}, + {"ClientTrace.ConnectDone", Field, 7}, + {"ClientTrace.ConnectStart", Field, 7}, + {"ClientTrace.DNSDone", Field, 7}, + {"ClientTrace.DNSStart", Field, 7}, + {"ClientTrace.GetConn", Field, 7}, + {"ClientTrace.Got100Continue", Field, 7}, + {"ClientTrace.Got1xxResponse", Field, 11}, + {"ClientTrace.GotConn", Field, 7}, + {"ClientTrace.GotFirstResponseByte", Field, 7}, + {"ClientTrace.PutIdleConn", Field, 7}, + {"ClientTrace.TLSHandshakeDone", Field, 8}, + {"ClientTrace.TLSHandshakeStart", Field, 8}, + {"ClientTrace.Wait100Continue", Field, 7}, + {"ClientTrace.WroteHeaderField", Field, 11}, + {"ClientTrace.WroteHeaders", Field, 7}, + {"ClientTrace.WroteRequest", Field, 7}, + {"ContextClientTrace", Func, 7}, + {"DNSDoneInfo", Type, 7}, + {"DNSDoneInfo.Addrs", Field, 7}, + {"DNSDoneInfo.Coalesced", Field, 7}, + {"DNSDoneInfo.Err", Field, 7}, + {"DNSStartInfo", Type, 7}, + {"DNSStartInfo.Host", Field, 7}, + {"GotConnInfo", Type, 7}, + {"GotConnInfo.Conn", Field, 7}, + {"GotConnInfo.IdleTime", Field, 7}, + {"GotConnInfo.Reused", Field, 7}, + {"GotConnInfo.WasIdle", Field, 7}, + {"WithClientTrace", Func, 7}, + {"WroteRequestInfo", Type, 7}, + {"WroteRequestInfo.Err", Field, 7}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0}, + {"(*ClientConn).Do", Method, 0}, + {"(*ClientConn).Hijack", Method, 0}, + {"(*ClientConn).Pending", Method, 0}, + {"(*ClientConn).Read", Method, 0}, + {"(*ClientConn).Write", Method, 0}, + {"(*ProxyRequest).SetURL", Method, 20}, + {"(*ProxyRequest).SetXForwarded", Method, 20}, + {"(*ReverseProxy).ServeHTTP", Method, 0}, + {"(*ServerConn).Close", Method, 0}, + {"(*ServerConn).Hijack", Method, 0}, + {"(*ServerConn).Pending", Method, 0}, + {"(*ServerConn).Read", Method, 0}, + {"(*ServerConn).Write", Method, 0}, + {"BufferPool", Type, 6}, + {"ClientConn", Type, 0}, + {"DumpRequest", Func, 0}, + {"DumpRequestOut", Func, 0}, + {"DumpResponse", Func, 0}, + {"ErrClosed", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrPersistEOF", Var, 0}, + {"ErrPipeline", Var, 0}, + {"NewChunkedReader", Func, 0}, + {"NewChunkedWriter", Func, 0}, + {"NewClientConn", Func, 0}, + {"NewProxyClientConn", Func, 0}, + {"NewServerConn", Func, 0}, + {"NewSingleHostReverseProxy", Func, 0}, + {"ProxyRequest", Type, 20}, + {"ProxyRequest.In", Field, 20}, + {"ProxyRequest.Out", Field, 20}, + {"ReverseProxy", Type, 0}, + {"ReverseProxy.BufferPool", Field, 6}, + {"ReverseProxy.Director", Field, 0}, + {"ReverseProxy.ErrorHandler", Field, 11}, + {"ReverseProxy.ErrorLog", Field, 4}, + {"ReverseProxy.FlushInterval", Field, 0}, + {"ReverseProxy.ModifyResponse", Field, 8}, + {"ReverseProxy.Rewrite", Field, 20}, + {"ReverseProxy.Transport", Field, 0}, + {"ServerConn", Type, 0}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0}, + {"Handler", Func, 0}, + {"Index", Func, 0}, + {"Profile", Func, 0}, + {"Symbol", Func, 0}, + {"Trace", Func, 5}, + }, + "net/mail": { + {"(*Address).String", Method, 0}, + {"(*AddressParser).Parse", Method, 5}, + {"(*AddressParser).ParseList", Method, 5}, + {"(Header).AddressList", Method, 0}, + {"(Header).Date", Method, 0}, + {"(Header).Get", Method, 0}, + {"Address", Type, 0}, + {"Address.Address", Field, 0}, + {"Address.Name", Field, 0}, + {"AddressParser", Type, 5}, + {"AddressParser.WordDecoder", Field, 5}, + {"ErrHeaderNotPresent", Var, 0}, + {"Header", Type, 0}, + {"Message", Type, 0}, + {"Message.Body", Field, 0}, + {"Message.Header", Field, 0}, + {"ParseAddress", Func, 1}, + {"ParseAddressList", Func, 1}, + {"ParseDate", Func, 8}, + {"ReadMessage", Func, 0}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18}, + {"(*Addr).UnmarshalText", Method, 18}, + {"(*AddrPort).UnmarshalBinary", Method, 18}, + {"(*AddrPort).UnmarshalText", Method, 18}, + {"(*Prefix).UnmarshalBinary", Method, 18}, + {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendTo", Method, 18}, + {"(Addr).As16", Method, 18}, + {"(Addr).As4", Method, 18}, + {"(Addr).AsSlice", Method, 18}, + {"(Addr).BitLen", Method, 18}, + {"(Addr).Compare", Method, 18}, + {"(Addr).Is4", Method, 18}, + {"(Addr).Is4In6", Method, 18}, + {"(Addr).Is6", Method, 18}, + {"(Addr).IsGlobalUnicast", Method, 18}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalUnicast", Method, 18}, + {"(Addr).IsLoopback", Method, 18}, + {"(Addr).IsMulticast", Method, 18}, + {"(Addr).IsPrivate", Method, 18}, + {"(Addr).IsUnspecified", Method, 18}, + {"(Addr).IsValid", Method, 18}, + {"(Addr).Less", Method, 18}, + {"(Addr).MarshalBinary", Method, 18}, + {"(Addr).MarshalText", Method, 18}, + {"(Addr).Next", Method, 18}, + {"(Addr).Prefix", Method, 18}, + {"(Addr).Prev", Method, 18}, + {"(Addr).String", Method, 18}, + {"(Addr).StringExpanded", Method, 18}, + {"(Addr).Unmap", Method, 18}, + {"(Addr).WithZone", Method, 18}, + {"(Addr).Zone", Method, 18}, + {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendTo", Method, 18}, + {"(AddrPort).Compare", Method, 22}, + {"(AddrPort).IsValid", Method, 18}, + {"(AddrPort).MarshalBinary", Method, 18}, + {"(AddrPort).MarshalText", Method, 18}, + {"(AddrPort).Port", Method, 18}, + {"(AddrPort).String", Method, 18}, + {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendTo", Method, 18}, + {"(Prefix).Bits", Method, 18}, + {"(Prefix).Contains", Method, 18}, + {"(Prefix).IsSingleIP", Method, 18}, + {"(Prefix).IsValid", Method, 18}, + {"(Prefix).MarshalBinary", Method, 18}, + {"(Prefix).MarshalText", Method, 18}, + {"(Prefix).Masked", Method, 18}, + {"(Prefix).Overlaps", Method, 18}, + {"(Prefix).String", Method, 18}, + {"Addr", Type, 18}, + {"AddrFrom16", Func, 18}, + {"AddrFrom4", Func, 18}, + {"AddrFromSlice", Func, 18}, + {"AddrPort", Type, 18}, + {"AddrPortFrom", Func, 18}, + {"IPv4Unspecified", Func, 18}, + {"IPv6LinkLocalAllNodes", Func, 18}, + {"IPv6LinkLocalAllRouters", Func, 20}, + {"IPv6Loopback", Func, 20}, + {"IPv6Unspecified", Func, 18}, + {"MustParseAddr", Func, 18}, + {"MustParseAddrPort", Func, 18}, + {"MustParsePrefix", Func, 18}, + {"ParseAddr", Func, 18}, + {"ParseAddrPort", Func, 18}, + {"ParsePrefix", Func, 18}, + {"Prefix", Type, 18}, + {"PrefixFrom", Func, 18}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0}, + {"(*Client).Close", Method, 0}, + {"(*Client).Go", Method, 0}, + {"(*Server).Accept", Method, 0}, + {"(*Server).HandleHTTP", Method, 0}, + {"(*Server).Register", Method, 0}, + {"(*Server).RegisterName", Method, 0}, + {"(*Server).ServeCodec", Method, 0}, + {"(*Server).ServeConn", Method, 0}, + {"(*Server).ServeHTTP", Method, 0}, + {"(*Server).ServeRequest", Method, 0}, + {"(ServerError).Error", Method, 0}, + {"Accept", Func, 0}, + {"Call", Type, 0}, + {"Call.Args", Field, 0}, + {"Call.Done", Field, 0}, + {"Call.Error", Field, 0}, + {"Call.Reply", Field, 0}, + {"Call.ServiceMethod", Field, 0}, + {"Client", Type, 0}, + {"ClientCodec", Type, 0}, + {"DefaultDebugPath", Const, 0}, + {"DefaultRPCPath", Const, 0}, + {"DefaultServer", Var, 0}, + {"Dial", Func, 0}, + {"DialHTTP", Func, 0}, + {"DialHTTPPath", Func, 0}, + {"ErrShutdown", Var, 0}, + {"HandleHTTP", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientWithCodec", Func, 0}, + {"NewServer", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + {"Request", Type, 0}, + {"Request.Seq", Field, 0}, + {"Request.ServiceMethod", Field, 0}, + {"Response", Type, 0}, + {"Response.Error", Field, 0}, + {"Response.Seq", Field, 0}, + {"Response.ServiceMethod", Field, 0}, + {"ServeCodec", Func, 0}, + {"ServeConn", Func, 0}, + {"ServeRequest", Func, 0}, + {"Server", Type, 0}, + {"ServerCodec", Type, 0}, + {"ServerError", Type, 0}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientCodec", Func, 0}, + {"NewServerCodec", Func, 0}, + {"ServeConn", Func, 0}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0}, + {"(*Client).Close", Method, 2}, + {"(*Client).Data", Method, 0}, + {"(*Client).Extension", Method, 0}, + {"(*Client).Hello", Method, 1}, + {"(*Client).Mail", Method, 0}, + {"(*Client).Noop", Method, 10}, + {"(*Client).Quit", Method, 0}, + {"(*Client).Rcpt", Method, 0}, + {"(*Client).Reset", Method, 0}, + {"(*Client).StartTLS", Method, 0}, + {"(*Client).TLSConnectionState", Method, 5}, + {"(*Client).Verify", Method, 0}, + {"Auth", Type, 0}, + {"CRAMMD5Auth", Func, 0}, + {"Client", Type, 0}, + {"Client.Text", Field, 0}, + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"PlainAuth", Func, 0}, + {"SendMail", Func, 0}, + {"ServerInfo", Type, 0}, + {"ServerInfo.Auth", Field, 0}, + {"ServerInfo.Name", Field, 0}, + {"ServerInfo.TLS", Field, 0}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0}, + {"(*Conn).Cmd", Method, 0}, + {"(*Conn).DotReader", Method, 0}, + {"(*Conn).DotWriter", Method, 0}, + {"(*Conn).EndRequest", Method, 0}, + {"(*Conn).EndResponse", Method, 0}, + {"(*Conn).Next", Method, 0}, + {"(*Conn).PrintfLine", Method, 0}, + {"(*Conn).ReadCodeLine", Method, 0}, + {"(*Conn).ReadContinuedLine", Method, 0}, + {"(*Conn).ReadContinuedLineBytes", Method, 0}, + {"(*Conn).ReadDotBytes", Method, 0}, + {"(*Conn).ReadDotLines", Method, 0}, + {"(*Conn).ReadLine", Method, 0}, + {"(*Conn).ReadLineBytes", Method, 0}, + {"(*Conn).ReadMIMEHeader", Method, 0}, + {"(*Conn).ReadResponse", Method, 0}, + {"(*Conn).StartRequest", Method, 0}, + {"(*Conn).StartResponse", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Pipeline).EndRequest", Method, 0}, + {"(*Pipeline).EndResponse", Method, 0}, + {"(*Pipeline).Next", Method, 0}, + {"(*Pipeline).StartRequest", Method, 0}, + {"(*Pipeline).StartResponse", Method, 0}, + {"(*Reader).DotReader", Method, 0}, + {"(*Reader).ReadCodeLine", Method, 0}, + {"(*Reader).ReadContinuedLine", Method, 0}, + {"(*Reader).ReadContinuedLineBytes", Method, 0}, + {"(*Reader).ReadDotBytes", Method, 0}, + {"(*Reader).ReadDotLines", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadLineBytes", Method, 0}, + {"(*Reader).ReadMIMEHeader", Method, 0}, + {"(*Reader).ReadResponse", Method, 0}, + {"(*Writer).DotWriter", Method, 0}, + {"(*Writer).PrintfLine", Method, 0}, + {"(MIMEHeader).Add", Method, 0}, + {"(MIMEHeader).Del", Method, 0}, + {"(MIMEHeader).Get", Method, 0}, + {"(MIMEHeader).Set", Method, 0}, + {"(MIMEHeader).Values", Method, 14}, + {"(ProtocolError).Error", Method, 0}, + {"CanonicalMIMEHeaderKey", Func, 0}, + {"Conn", Type, 0}, + {"Conn.Pipeline", Field, 0}, + {"Conn.Reader", Field, 0}, + {"Conn.Writer", Field, 0}, + {"Dial", Func, 0}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Msg", Field, 0}, + {"MIMEHeader", Type, 0}, + {"NewConn", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Pipeline", Type, 0}, + {"ProtocolError", Type, 0}, + {"Reader", Type, 0}, + {"Reader.R", Field, 0}, + {"TrimBytes", Func, 1}, + {"TrimString", Func, 1}, + {"Writer", Type, 0}, + {"Writer.W", Field, 0}, + }, + "net/url": { + {"(*Error).Error", Method, 0}, + {"(*Error).Temporary", Method, 6}, + {"(*Error).Timeout", Method, 6}, + {"(*Error).Unwrap", Method, 13}, + {"(*URL).EscapedFragment", Method, 15}, + {"(*URL).EscapedPath", Method, 5}, + {"(*URL).Hostname", Method, 8}, + {"(*URL).IsAbs", Method, 0}, + {"(*URL).JoinPath", Method, 19}, + {"(*URL).MarshalBinary", Method, 8}, + {"(*URL).Parse", Method, 0}, + {"(*URL).Port", Method, 8}, + {"(*URL).Query", Method, 0}, + {"(*URL).Redacted", Method, 15}, + {"(*URL).RequestURI", Method, 0}, + {"(*URL).ResolveReference", Method, 0}, + {"(*URL).String", Method, 0}, + {"(*URL).UnmarshalBinary", Method, 8}, + {"(*Userinfo).Password", Method, 0}, + {"(*Userinfo).String", Method, 0}, + {"(*Userinfo).Username", Method, 0}, + {"(EscapeError).Error", Method, 0}, + {"(InvalidHostError).Error", Method, 6}, + {"(Values).Add", Method, 0}, + {"(Values).Del", Method, 0}, + {"(Values).Encode", Method, 0}, + {"(Values).Get", Method, 0}, + {"(Values).Has", Method, 17}, + {"(Values).Set", Method, 0}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Op", Field, 0}, + {"Error.URL", Field, 0}, + {"EscapeError", Type, 0}, + {"InvalidHostError", Type, 6}, + {"JoinPath", Func, 19}, + {"Parse", Func, 0}, + {"ParseQuery", Func, 0}, + {"ParseRequestURI", Func, 0}, + {"PathEscape", Func, 8}, + {"PathUnescape", Func, 8}, + {"QueryEscape", Func, 0}, + {"QueryUnescape", Func, 0}, + {"URL", Type, 0}, + {"URL.ForceQuery", Field, 7}, + {"URL.Fragment", Field, 0}, + {"URL.Host", Field, 0}, + {"URL.OmitHost", Field, 19}, + {"URL.Opaque", Field, 0}, + {"URL.Path", Field, 0}, + {"URL.RawFragment", Field, 15}, + {"URL.RawPath", Field, 5}, + {"URL.RawQuery", Field, 0}, + {"URL.Scheme", Field, 0}, + {"URL.User", Field, 0}, + {"User", Func, 0}, + {"UserPassword", Func, 0}, + {"Userinfo", Type, 0}, + {"Values", Type, 0}, + }, + "os": { + {"(*File).Chdir", Method, 0}, + {"(*File).Chmod", Method, 0}, + {"(*File).Chown", Method, 0}, + {"(*File).Close", Method, 0}, + {"(*File).Fd", Method, 0}, + {"(*File).Name", Method, 0}, + {"(*File).Read", Method, 0}, + {"(*File).ReadAt", Method, 0}, + {"(*File).ReadDir", Method, 16}, + {"(*File).ReadFrom", Method, 15}, + {"(*File).Readdir", Method, 0}, + {"(*File).Readdirnames", Method, 0}, + {"(*File).Seek", Method, 0}, + {"(*File).SetDeadline", Method, 10}, + {"(*File).SetReadDeadline", Method, 10}, + {"(*File).SetWriteDeadline", Method, 10}, + {"(*File).Stat", Method, 0}, + {"(*File).Sync", Method, 0}, + {"(*File).SyscallConn", Method, 12}, + {"(*File).Truncate", Method, 0}, + {"(*File).Write", Method, 0}, + {"(*File).WriteAt", Method, 0}, + {"(*File).WriteString", Method, 0}, + {"(*File).WriteTo", Method, 22}, + {"(*LinkError).Error", Method, 0}, + {"(*LinkError).Unwrap", Method, 13}, + {"(*PathError).Error", Method, 0}, + {"(*PathError).Timeout", Method, 10}, + {"(*PathError).Unwrap", Method, 13}, + {"(*Process).Kill", Method, 0}, + {"(*Process).Release", Method, 0}, + {"(*Process).Signal", Method, 0}, + {"(*Process).Wait", Method, 0}, + {"(*ProcessState).ExitCode", Method, 12}, + {"(*ProcessState).Exited", Method, 0}, + {"(*ProcessState).Pid", Method, 0}, + {"(*ProcessState).String", Method, 0}, + {"(*ProcessState).Success", Method, 0}, + {"(*ProcessState).Sys", Method, 0}, + {"(*ProcessState).SysUsage", Method, 0}, + {"(*ProcessState).SystemTime", Method, 0}, + {"(*ProcessState).UserTime", Method, 0}, + {"(*SyscallError).Error", Method, 0}, + {"(*SyscallError).Timeout", Method, 10}, + {"(*SyscallError).Unwrap", Method, 13}, + {"(FileMode).IsDir", Method, 0}, + {"(FileMode).IsRegular", Method, 1}, + {"(FileMode).Perm", Method, 0}, + {"(FileMode).String", Method, 0}, + {"Args", Var, 0}, + {"Chdir", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chtimes", Func, 0}, + {"Clearenv", Func, 0}, + {"Create", Func, 0}, + {"CreateTemp", Func, 16}, + {"DevNull", Const, 0}, + {"DirEntry", Type, 16}, + {"DirFS", Func, 16}, + {"Environ", Func, 0}, + {"ErrClosed", Var, 8}, + {"ErrDeadlineExceeded", Var, 15}, + {"ErrExist", Var, 0}, + {"ErrInvalid", Var, 0}, + {"ErrNoDeadline", Var, 10}, + {"ErrNotExist", Var, 0}, + {"ErrPermission", Var, 0}, + {"ErrProcessDone", Var, 16}, + {"Executable", Func, 8}, + {"Exit", Func, 0}, + {"Expand", Func, 0}, + {"ExpandEnv", Func, 0}, + {"File", Type, 0}, + {"FileInfo", Type, 0}, + {"FileMode", Type, 0}, + {"FindProcess", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Hostname", Func, 0}, + {"Interrupt", Var, 0}, + {"IsExist", Func, 0}, + {"IsNotExist", Func, 0}, + {"IsPathSeparator", Func, 0}, + {"IsPermission", Func, 0}, + {"IsTimeout", Func, 10}, + {"Kill", Var, 0}, + {"Lchown", Func, 0}, + {"Link", Func, 0}, + {"LinkError", Type, 0}, + {"LinkError.Err", Field, 0}, + {"LinkError.New", Field, 0}, + {"LinkError.Old", Field, 0}, + {"LinkError.Op", Field, 0}, + {"LookupEnv", Func, 5}, + {"Lstat", Func, 0}, + {"Mkdir", Func, 0}, + {"MkdirAll", Func, 0}, + {"MkdirTemp", Func, 16}, + {"ModeAppend", Const, 0}, + {"ModeCharDevice", Const, 0}, + {"ModeDevice", Const, 0}, + {"ModeDir", Const, 0}, + {"ModeExclusive", Const, 0}, + {"ModeIrregular", Const, 11}, + {"ModeNamedPipe", Const, 0}, + {"ModePerm", Const, 0}, + {"ModeSetgid", Const, 0}, + {"ModeSetuid", Const, 0}, + {"ModeSocket", Const, 0}, + {"ModeSticky", Const, 0}, + {"ModeSymlink", Const, 0}, + {"ModeTemporary", Const, 0}, + {"ModeType", Const, 0}, + {"NewFile", Func, 0}, + {"NewSyscallError", Func, 0}, + {"O_APPEND", Const, 0}, + {"O_CREATE", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenFile", Func, 0}, + {"PathError", Type, 0}, + {"PathError.Err", Field, 0}, + {"PathError.Op", Field, 0}, + {"PathError.Path", Field, 0}, + {"PathListSeparator", Const, 0}, + {"PathSeparator", Const, 0}, + {"Pipe", Func, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process", Type, 0}, + {"Process.Pid", Field, 0}, + {"ProcessState", Type, 0}, + {"ReadDir", Func, 16}, + {"ReadFile", Func, 16}, + {"Readlink", Func, 0}, + {"Remove", Func, 0}, + {"RemoveAll", Func, 0}, + {"Rename", Func, 0}, + {"SEEK_CUR", Const, 0}, + {"SEEK_END", Const, 0}, + {"SEEK_SET", Const, 0}, + {"SameFile", Func, 0}, + {"Setenv", Func, 0}, + {"Signal", Type, 0}, + {"StartProcess", Func, 0}, + {"Stat", Func, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"Symlink", Func, 0}, + {"SyscallError", Type, 0}, + {"SyscallError.Err", Field, 0}, + {"SyscallError.Syscall", Field, 0}, + {"TempDir", Func, 0}, + {"Truncate", Func, 0}, + {"Unsetenv", Func, 4}, + {"UserCacheDir", Func, 11}, + {"UserConfigDir", Func, 13}, + {"UserHomeDir", Func, 12}, + {"WriteFile", Func, 16}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0}, + {"(*Cmd).Environ", Method, 19}, + {"(*Cmd).Output", Method, 0}, + {"(*Cmd).Run", Method, 0}, + {"(*Cmd).Start", Method, 0}, + {"(*Cmd).StderrPipe", Method, 0}, + {"(*Cmd).StdinPipe", Method, 0}, + {"(*Cmd).StdoutPipe", Method, 0}, + {"(*Cmd).String", Method, 13}, + {"(*Cmd).Wait", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Error).Unwrap", Method, 13}, + {"(*ExitError).Error", Method, 0}, + {"(ExitError).ExitCode", Method, 12}, + {"(ExitError).Exited", Method, 0}, + {"(ExitError).Pid", Method, 0}, + {"(ExitError).String", Method, 0}, + {"(ExitError).Success", Method, 0}, + {"(ExitError).Sys", Method, 0}, + {"(ExitError).SysUsage", Method, 0}, + {"(ExitError).SystemTime", Method, 0}, + {"(ExitError).UserTime", Method, 0}, + {"Cmd", Type, 0}, + {"Cmd.Args", Field, 0}, + {"Cmd.Cancel", Field, 20}, + {"Cmd.Dir", Field, 0}, + {"Cmd.Env", Field, 0}, + {"Cmd.Err", Field, 19}, + {"Cmd.ExtraFiles", Field, 0}, + {"Cmd.Path", Field, 0}, + {"Cmd.Process", Field, 0}, + {"Cmd.ProcessState", Field, 0}, + {"Cmd.Stderr", Field, 0}, + {"Cmd.Stdin", Field, 0}, + {"Cmd.Stdout", Field, 0}, + {"Cmd.SysProcAttr", Field, 0}, + {"Cmd.WaitDelay", Field, 20}, + {"Command", Func, 0}, + {"CommandContext", Func, 7}, + {"ErrDot", Var, 19}, + {"ErrNotFound", Var, 0}, + {"ErrWaitDelay", Var, 20}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Name", Field, 0}, + {"ExitError", Type, 0}, + {"ExitError.ProcessState", Field, 0}, + {"ExitError.Stderr", Field, 6}, + {"LookPath", Func, 0}, + }, + "os/signal": { + {"Ignore", Func, 5}, + {"Ignored", Func, 11}, + {"Notify", Func, 0}, + {"NotifyContext", Func, 16}, + {"Reset", Func, 5}, + {"Stop", Func, 1}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7}, + {"(UnknownGroupError).Error", Method, 7}, + {"(UnknownGroupIdError).Error", Method, 7}, + {"(UnknownUserError).Error", Method, 0}, + {"(UnknownUserIdError).Error", Method, 0}, + {"Current", Func, 0}, + {"Group", Type, 7}, + {"Group.Gid", Field, 7}, + {"Group.Name", Field, 7}, + {"Lookup", Func, 0}, + {"LookupGroup", Func, 7}, + {"LookupGroupId", Func, 7}, + {"LookupId", Func, 0}, + {"UnknownGroupError", Type, 7}, + {"UnknownGroupIdError", Type, 7}, + {"UnknownUserError", Type, 0}, + {"UnknownUserIdError", Type, 0}, + {"User", Type, 0}, + {"User.Gid", Field, 0}, + {"User.HomeDir", Field, 0}, + {"User.Name", Field, 0}, + {"User.Uid", Field, 0}, + {"User.Username", Field, 0}, + }, + "path": { + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"Ext", Func, 0}, + {"IsAbs", Func, 0}, + {"Join", Func, 0}, + {"Match", Func, 0}, + {"Split", Func, 0}, + }, + "path/filepath": { + {"Abs", Func, 0}, + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"EvalSymlinks", Func, 0}, + {"Ext", Func, 0}, + {"FromSlash", Func, 0}, + {"Glob", Func, 0}, + {"HasPrefix", Func, 0}, + {"IsAbs", Func, 0}, + {"IsLocal", Func, 20}, + {"Join", Func, 0}, + {"ListSeparator", Const, 0}, + {"Match", Func, 0}, + {"Rel", Func, 0}, + {"Separator", Const, 0}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 0}, + {"Split", Func, 0}, + {"SplitList", Func, 0}, + {"ToSlash", Func, 0}, + {"VolumeName", Func, 0}, + {"Walk", Func, 0}, + {"WalkDir", Func, 16}, + {"WalkFunc", Type, 0}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8}, + {"Open", Func, 8}, + {"Plugin", Type, 8}, + {"Symbol", Type, 8}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12}, + {"(*MapIter).Next", Method, 12}, + {"(*MapIter).Reset", Method, 18}, + {"(*MapIter).Value", Method, 12}, + {"(*ValueError).Error", Method, 0}, + {"(ChanDir).String", Method, 0}, + {"(Kind).String", Method, 0}, + {"(Method).IsExported", Method, 17}, + {"(StructField).IsExported", Method, 17}, + {"(StructTag).Get", Method, 0}, + {"(StructTag).Lookup", Method, 7}, + {"(Value).Addr", Method, 0}, + {"(Value).Bool", Method, 0}, + {"(Value).Bytes", Method, 0}, + {"(Value).Call", Method, 0}, + {"(Value).CallSlice", Method, 0}, + {"(Value).CanAddr", Method, 0}, + {"(Value).CanComplex", Method, 18}, + {"(Value).CanConvert", Method, 17}, + {"(Value).CanFloat", Method, 18}, + {"(Value).CanInt", Method, 18}, + {"(Value).CanInterface", Method, 0}, + {"(Value).CanSet", Method, 0}, + {"(Value).CanUint", Method, 18}, + {"(Value).Cap", Method, 0}, + {"(Value).Clear", Method, 21}, + {"(Value).Close", Method, 0}, + {"(Value).Comparable", Method, 20}, + {"(Value).Complex", Method, 0}, + {"(Value).Convert", Method, 1}, + {"(Value).Elem", Method, 0}, + {"(Value).Equal", Method, 20}, + {"(Value).Field", Method, 0}, + {"(Value).FieldByIndex", Method, 0}, + {"(Value).FieldByIndexErr", Method, 18}, + {"(Value).FieldByName", Method, 0}, + {"(Value).FieldByNameFunc", Method, 0}, + {"(Value).Float", Method, 0}, + {"(Value).Grow", Method, 20}, + {"(Value).Index", Method, 0}, + {"(Value).Int", Method, 0}, + {"(Value).Interface", Method, 0}, + {"(Value).InterfaceData", Method, 0}, + {"(Value).IsNil", Method, 0}, + {"(Value).IsValid", Method, 0}, + {"(Value).IsZero", Method, 13}, + {"(Value).Kind", Method, 0}, + {"(Value).Len", Method, 0}, + {"(Value).MapIndex", Method, 0}, + {"(Value).MapKeys", Method, 0}, + {"(Value).MapRange", Method, 12}, + {"(Value).Method", Method, 0}, + {"(Value).MethodByName", Method, 0}, + {"(Value).NumField", Method, 0}, + {"(Value).NumMethod", Method, 0}, + {"(Value).OverflowComplex", Method, 0}, + {"(Value).OverflowFloat", Method, 0}, + {"(Value).OverflowInt", Method, 0}, + {"(Value).OverflowUint", Method, 0}, + {"(Value).Pointer", Method, 0}, + {"(Value).Recv", Method, 0}, + {"(Value).Send", Method, 0}, + {"(Value).Set", Method, 0}, + {"(Value).SetBool", Method, 0}, + {"(Value).SetBytes", Method, 0}, + {"(Value).SetCap", Method, 2}, + {"(Value).SetComplex", Method, 0}, + {"(Value).SetFloat", Method, 0}, + {"(Value).SetInt", Method, 0}, + {"(Value).SetIterKey", Method, 18}, + {"(Value).SetIterValue", Method, 18}, + {"(Value).SetLen", Method, 0}, + {"(Value).SetMapIndex", Method, 0}, + {"(Value).SetPointer", Method, 0}, + {"(Value).SetString", Method, 0}, + {"(Value).SetUint", Method, 0}, + {"(Value).SetZero", Method, 20}, + {"(Value).Slice", Method, 0}, + {"(Value).Slice3", Method, 2}, + {"(Value).String", Method, 0}, + {"(Value).TryRecv", Method, 0}, + {"(Value).TrySend", Method, 0}, + {"(Value).Type", Method, 0}, + {"(Value).Uint", Method, 0}, + {"(Value).UnsafeAddr", Method, 0}, + {"(Value).UnsafePointer", Method, 18}, + {"Append", Func, 0}, + {"AppendSlice", Func, 0}, + {"Array", Const, 0}, + {"ArrayOf", Func, 5}, + {"Bool", Const, 0}, + {"BothDir", Const, 0}, + {"Chan", Const, 0}, + {"ChanDir", Type, 0}, + {"ChanOf", Func, 1}, + {"Complex128", Const, 0}, + {"Complex64", Const, 0}, + {"Copy", Func, 0}, + {"DeepEqual", Func, 0}, + {"Float32", Const, 0}, + {"Float64", Const, 0}, + {"Func", Const, 0}, + {"FuncOf", Func, 5}, + {"Indirect", Func, 0}, + {"Int", Const, 0}, + {"Int16", Const, 0}, + {"Int32", Const, 0}, + {"Int64", Const, 0}, + {"Int8", Const, 0}, + {"Interface", Const, 0}, + {"Invalid", Const, 0}, + {"Kind", Type, 0}, + {"MakeChan", Func, 0}, + {"MakeFunc", Func, 1}, + {"MakeMap", Func, 0}, + {"MakeMapWithSize", Func, 9}, + {"MakeSlice", Func, 0}, + {"Map", Const, 0}, + {"MapIter", Type, 12}, + {"MapOf", Func, 1}, + {"Method", Type, 0}, + {"Method.Func", Field, 0}, + {"Method.Index", Field, 0}, + {"Method.Name", Field, 0}, + {"Method.PkgPath", Field, 0}, + {"Method.Type", Field, 0}, + {"New", Func, 0}, + {"NewAt", Func, 0}, + {"Pointer", Const, 18}, + {"PointerTo", Func, 18}, + {"Ptr", Const, 0}, + {"PtrTo", Func, 0}, + {"RecvDir", Const, 0}, + {"Select", Func, 1}, + {"SelectCase", Type, 1}, + {"SelectCase.Chan", Field, 1}, + {"SelectCase.Dir", Field, 1}, + {"SelectCase.Send", Field, 1}, + {"SelectDefault", Const, 1}, + {"SelectDir", Type, 1}, + {"SelectRecv", Const, 1}, + {"SelectSend", Const, 1}, + {"SendDir", Const, 0}, + {"Slice", Const, 0}, + {"SliceHeader", Type, 0}, + {"SliceHeader.Cap", Field, 0}, + {"SliceHeader.Data", Field, 0}, + {"SliceHeader.Len", Field, 0}, + {"SliceOf", Func, 1}, + {"String", Const, 0}, + {"StringHeader", Type, 0}, + {"StringHeader.Data", Field, 0}, + {"StringHeader.Len", Field, 0}, + {"Struct", Const, 0}, + {"StructField", Type, 0}, + {"StructField.Anonymous", Field, 0}, + {"StructField.Index", Field, 0}, + {"StructField.Name", Field, 0}, + {"StructField.Offset", Field, 0}, + {"StructField.PkgPath", Field, 0}, + {"StructField.Tag", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructOf", Func, 7}, + {"StructTag", Type, 0}, + {"Swapper", Func, 8}, + {"Type", Type, 0}, + {"TypeFor", Func, 22}, + {"TypeOf", Func, 0}, + {"Uint", Const, 0}, + {"Uint16", Const, 0}, + {"Uint32", Const, 0}, + {"Uint64", Const, 0}, + {"Uint8", Const, 0}, + {"Uintptr", Const, 0}, + {"UnsafePointer", Const, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueError.Kind", Field, 0}, + {"ValueError.Method", Field, 0}, + {"ValueOf", Func, 0}, + {"VisibleFields", Func, 17}, + {"Zero", Func, 0}, + }, + "regexp": { + {"(*Regexp).Copy", Method, 6}, + {"(*Regexp).Expand", Method, 0}, + {"(*Regexp).ExpandString", Method, 0}, + {"(*Regexp).Find", Method, 0}, + {"(*Regexp).FindAll", Method, 0}, + {"(*Regexp).FindAllIndex", Method, 0}, + {"(*Regexp).FindAllString", Method, 0}, + {"(*Regexp).FindAllStringIndex", Method, 0}, + {"(*Regexp).FindAllStringSubmatch", Method, 0}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindAllSubmatch", Method, 0}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0}, + {"(*Regexp).FindIndex", Method, 0}, + {"(*Regexp).FindReaderIndex", Method, 0}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0}, + {"(*Regexp).FindString", Method, 0}, + {"(*Regexp).FindStringIndex", Method, 0}, + {"(*Regexp).FindStringSubmatch", Method, 0}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindSubmatch", Method, 0}, + {"(*Regexp).FindSubmatchIndex", Method, 0}, + {"(*Regexp).LiteralPrefix", Method, 0}, + {"(*Regexp).Longest", Method, 1}, + {"(*Regexp).MarshalText", Method, 21}, + {"(*Regexp).Match", Method, 0}, + {"(*Regexp).MatchReader", Method, 0}, + {"(*Regexp).MatchString", Method, 0}, + {"(*Regexp).NumSubexp", Method, 0}, + {"(*Regexp).ReplaceAll", Method, 0}, + {"(*Regexp).ReplaceAllFunc", Method, 0}, + {"(*Regexp).ReplaceAllLiteral", Method, 0}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0}, + {"(*Regexp).ReplaceAllString", Method, 0}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0}, + {"(*Regexp).Split", Method, 1}, + {"(*Regexp).String", Method, 0}, + {"(*Regexp).SubexpIndex", Method, 15}, + {"(*Regexp).SubexpNames", Method, 0}, + {"(*Regexp).UnmarshalText", Method, 21}, + {"Compile", Func, 0}, + {"CompilePOSIX", Func, 0}, + {"Match", Func, 0}, + {"MatchReader", Func, 0}, + {"MatchString", Func, 0}, + {"MustCompile", Func, 0}, + {"MustCompilePOSIX", Func, 0}, + {"QuoteMeta", Func, 0}, + {"Regexp", Type, 0}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0}, + {"(*Inst).MatchEmptyWidth", Method, 0}, + {"(*Inst).MatchRune", Method, 0}, + {"(*Inst).MatchRunePos", Method, 3}, + {"(*Inst).String", Method, 0}, + {"(*Prog).Prefix", Method, 0}, + {"(*Prog).StartCond", Method, 0}, + {"(*Prog).String", Method, 0}, + {"(*Regexp).CapNames", Method, 0}, + {"(*Regexp).Equal", Method, 0}, + {"(*Regexp).MaxCap", Method, 0}, + {"(*Regexp).Simplify", Method, 0}, + {"(*Regexp).String", Method, 0}, + {"(ErrorCode).String", Method, 0}, + {"(InstOp).String", Method, 3}, + {"(Op).String", Method, 11}, + {"ClassNL", Const, 0}, + {"Compile", Func, 0}, + {"DotNL", Const, 0}, + {"EmptyBeginLine", Const, 0}, + {"EmptyBeginText", Const, 0}, + {"EmptyEndLine", Const, 0}, + {"EmptyEndText", Const, 0}, + {"EmptyNoWordBoundary", Const, 0}, + {"EmptyOp", Type, 0}, + {"EmptyOpContext", Func, 0}, + {"EmptyWordBoundary", Const, 0}, + {"ErrInternalError", Const, 0}, + {"ErrInvalidCharClass", Const, 0}, + {"ErrInvalidCharRange", Const, 0}, + {"ErrInvalidEscape", Const, 0}, + {"ErrInvalidNamedCapture", Const, 0}, + {"ErrInvalidPerlOp", Const, 0}, + {"ErrInvalidRepeatOp", Const, 0}, + {"ErrInvalidRepeatSize", Const, 0}, + {"ErrInvalidUTF8", Const, 0}, + {"ErrLarge", Const, 20}, + {"ErrMissingBracket", Const, 0}, + {"ErrMissingParen", Const, 0}, + {"ErrMissingRepeatArgument", Const, 0}, + {"ErrNestingDepth", Const, 19}, + {"ErrTrailingBackslash", Const, 0}, + {"ErrUnexpectedParen", Const, 1}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Expr", Field, 0}, + {"ErrorCode", Type, 0}, + {"Flags", Type, 0}, + {"FoldCase", Const, 0}, + {"Inst", Type, 0}, + {"Inst.Arg", Field, 0}, + {"Inst.Op", Field, 0}, + {"Inst.Out", Field, 0}, + {"Inst.Rune", Field, 0}, + {"InstAlt", Const, 0}, + {"InstAltMatch", Const, 0}, + {"InstCapture", Const, 0}, + {"InstEmptyWidth", Const, 0}, + {"InstFail", Const, 0}, + {"InstMatch", Const, 0}, + {"InstNop", Const, 0}, + {"InstOp", Type, 0}, + {"InstRune", Const, 0}, + {"InstRune1", Const, 0}, + {"InstRuneAny", Const, 0}, + {"InstRuneAnyNotNL", Const, 0}, + {"IsWordChar", Func, 0}, + {"Literal", Const, 0}, + {"MatchNL", Const, 0}, + {"NonGreedy", Const, 0}, + {"OneLine", Const, 0}, + {"Op", Type, 0}, + {"OpAlternate", Const, 0}, + {"OpAnyChar", Const, 0}, + {"OpAnyCharNotNL", Const, 0}, + {"OpBeginLine", Const, 0}, + {"OpBeginText", Const, 0}, + {"OpCapture", Const, 0}, + {"OpCharClass", Const, 0}, + {"OpConcat", Const, 0}, + {"OpEmptyMatch", Const, 0}, + {"OpEndLine", Const, 0}, + {"OpEndText", Const, 0}, + {"OpLiteral", Const, 0}, + {"OpNoMatch", Const, 0}, + {"OpNoWordBoundary", Const, 0}, + {"OpPlus", Const, 0}, + {"OpQuest", Const, 0}, + {"OpRepeat", Const, 0}, + {"OpStar", Const, 0}, + {"OpWordBoundary", Const, 0}, + {"POSIX", Const, 0}, + {"Parse", Func, 0}, + {"Perl", Const, 0}, + {"PerlX", Const, 0}, + {"Prog", Type, 0}, + {"Prog.Inst", Field, 0}, + {"Prog.NumCap", Field, 0}, + {"Prog.Start", Field, 0}, + {"Regexp", Type, 0}, + {"Regexp.Cap", Field, 0}, + {"Regexp.Flags", Field, 0}, + {"Regexp.Max", Field, 0}, + {"Regexp.Min", Field, 0}, + {"Regexp.Name", Field, 0}, + {"Regexp.Op", Field, 0}, + {"Regexp.Rune", Field, 0}, + {"Regexp.Rune0", Field, 0}, + {"Regexp.Sub", Field, 0}, + {"Regexp.Sub0", Field, 0}, + {"Simple", Const, 0}, + {"UnicodeGroups", Const, 0}, + {"WasDollar", Const, 0}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1}, + {"(*Frames).Next", Method, 7}, + {"(*Func).Entry", Method, 0}, + {"(*Func).FileLine", Method, 0}, + {"(*Func).Name", Method, 0}, + {"(*MemProfileRecord).InUseBytes", Method, 0}, + {"(*MemProfileRecord).InUseObjects", Method, 0}, + {"(*MemProfileRecord).Stack", Method, 0}, + {"(*PanicNilError).Error", Method, 21}, + {"(*PanicNilError).RuntimeError", Method, 21}, + {"(*Pinner).Pin", Method, 21}, + {"(*Pinner).Unpin", Method, 21}, + {"(*StackRecord).Stack", Method, 0}, + {"(*TypeAssertionError).Error", Method, 0}, + {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"BlockProfile", Func, 1}, + {"BlockProfileRecord", Type, 1}, + {"BlockProfileRecord.Count", Field, 1}, + {"BlockProfileRecord.Cycles", Field, 1}, + {"BlockProfileRecord.StackRecord", Field, 1}, + {"Breakpoint", Func, 0}, + {"CPUProfile", Func, 0}, + {"Caller", Func, 0}, + {"Callers", Func, 0}, + {"CallersFrames", Func, 7}, + {"Compiler", Const, 0}, + {"Error", Type, 0}, + {"Frame", Type, 7}, + {"Frame.Entry", Field, 7}, + {"Frame.File", Field, 7}, + {"Frame.Func", Field, 7}, + {"Frame.Function", Field, 7}, + {"Frame.Line", Field, 7}, + {"Frame.PC", Field, 7}, + {"Frames", Type, 7}, + {"Func", Type, 0}, + {"FuncForPC", Func, 0}, + {"GC", Func, 0}, + {"GOARCH", Const, 0}, + {"GOMAXPROCS", Func, 0}, + {"GOOS", Const, 0}, + {"GOROOT", Func, 0}, + {"Goexit", Func, 0}, + {"GoroutineProfile", Func, 0}, + {"Gosched", Func, 0}, + {"KeepAlive", Func, 7}, + {"LockOSThread", Func, 0}, + {"MemProfile", Func, 0}, + {"MemProfileRate", Var, 0}, + {"MemProfileRecord", Type, 0}, + {"MemProfileRecord.AllocBytes", Field, 0}, + {"MemProfileRecord.AllocObjects", Field, 0}, + {"MemProfileRecord.FreeBytes", Field, 0}, + {"MemProfileRecord.FreeObjects", Field, 0}, + {"MemProfileRecord.Stack0", Field, 0}, + {"MemStats", Type, 0}, + {"MemStats.Alloc", Field, 0}, + {"MemStats.BuckHashSys", Field, 0}, + {"MemStats.BySize", Field, 0}, + {"MemStats.DebugGC", Field, 0}, + {"MemStats.EnableGC", Field, 0}, + {"MemStats.Frees", Field, 0}, + {"MemStats.GCCPUFraction", Field, 5}, + {"MemStats.GCSys", Field, 2}, + {"MemStats.HeapAlloc", Field, 0}, + {"MemStats.HeapIdle", Field, 0}, + {"MemStats.HeapInuse", Field, 0}, + {"MemStats.HeapObjects", Field, 0}, + {"MemStats.HeapReleased", Field, 0}, + {"MemStats.HeapSys", Field, 0}, + {"MemStats.LastGC", Field, 0}, + {"MemStats.Lookups", Field, 0}, + {"MemStats.MCacheInuse", Field, 0}, + {"MemStats.MCacheSys", Field, 0}, + {"MemStats.MSpanInuse", Field, 0}, + {"MemStats.MSpanSys", Field, 0}, + {"MemStats.Mallocs", Field, 0}, + {"MemStats.NextGC", Field, 0}, + {"MemStats.NumForcedGC", Field, 8}, + {"MemStats.NumGC", Field, 0}, + {"MemStats.OtherSys", Field, 2}, + {"MemStats.PauseEnd", Field, 4}, + {"MemStats.PauseNs", Field, 0}, + {"MemStats.PauseTotalNs", Field, 0}, + {"MemStats.StackInuse", Field, 0}, + {"MemStats.StackSys", Field, 0}, + {"MemStats.Sys", Field, 0}, + {"MemStats.TotalAlloc", Field, 0}, + {"MutexProfile", Func, 8}, + {"NumCPU", Func, 0}, + {"NumCgoCall", Func, 0}, + {"NumGoroutine", Func, 0}, + {"PanicNilError", Type, 21}, + {"Pinner", Type, 21}, + {"ReadMemStats", Func, 0}, + {"ReadTrace", Func, 5}, + {"SetBlockProfileRate", Func, 1}, + {"SetCPUProfileRate", Func, 0}, + {"SetCgoTraceback", Func, 7}, + {"SetFinalizer", Func, 0}, + {"SetMutexProfileFraction", Func, 8}, + {"Stack", Func, 0}, + {"StackRecord", Type, 0}, + {"StackRecord.Stack0", Field, 0}, + {"StartTrace", Func, 5}, + {"StopTrace", Func, 5}, + {"ThreadCreateProfile", Func, 0}, + {"TypeAssertionError", Type, 0}, + {"UnlockOSThread", Func, 0}, + {"Version", Func, 0}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17}, + {"(Handle).Value", Method, 17}, + {"Handle", Type, 17}, + {"Incomplete", Type, 20}, + {"NewHandle", Func, 17}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20}, + {"WriteCounters", Func, 20}, + {"WriteCountersDir", Func, 20}, + {"WriteMeta", Func, 20}, + {"WriteMetaDir", Func, 20}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18}, + {"BuildInfo", Type, 12}, + {"BuildInfo.Deps", Field, 12}, + {"BuildInfo.GoVersion", Field, 18}, + {"BuildInfo.Main", Field, 12}, + {"BuildInfo.Path", Field, 12}, + {"BuildInfo.Settings", Field, 18}, + {"BuildSetting", Type, 18}, + {"BuildSetting.Key", Field, 18}, + {"BuildSetting.Value", Field, 18}, + {"FreeOSMemory", Func, 1}, + {"GCStats", Type, 1}, + {"GCStats.LastGC", Field, 1}, + {"GCStats.NumGC", Field, 1}, + {"GCStats.Pause", Field, 1}, + {"GCStats.PauseEnd", Field, 4}, + {"GCStats.PauseQuantiles", Field, 1}, + {"GCStats.PauseTotal", Field, 1}, + {"Module", Type, 12}, + {"Module.Path", Field, 12}, + {"Module.Replace", Field, 12}, + {"Module.Sum", Field, 12}, + {"Module.Version", Field, 12}, + {"ParseBuildInfo", Func, 18}, + {"PrintStack", Func, 0}, + {"ReadBuildInfo", Func, 12}, + {"ReadGCStats", Func, 1}, + {"SetGCPercent", Func, 1}, + {"SetMaxStack", Func, 2}, + {"SetMaxThreads", Func, 2}, + {"SetMemoryLimit", Func, 19}, + {"SetPanicOnFault", Func, 3}, + {"SetTraceback", Func, 6}, + {"Stack", Func, 0}, + {"WriteHeapDump", Func, 3}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16}, + {"(Value).Float64Histogram", Method, 16}, + {"(Value).Kind", Method, 16}, + {"(Value).Uint64", Method, 16}, + {"All", Func, 16}, + {"Description", Type, 16}, + {"Description.Cumulative", Field, 16}, + {"Description.Description", Field, 16}, + {"Description.Kind", Field, 16}, + {"Description.Name", Field, 16}, + {"Float64Histogram", Type, 16}, + {"Float64Histogram.Buckets", Field, 16}, + {"Float64Histogram.Counts", Field, 16}, + {"KindBad", Const, 16}, + {"KindFloat64", Const, 16}, + {"KindFloat64Histogram", Const, 16}, + {"KindUint64", Const, 16}, + {"Read", Func, 16}, + {"Sample", Type, 16}, + {"Sample.Name", Field, 16}, + {"Sample.Value", Field, 16}, + {"Value", Type, 16}, + {"ValueKind", Type, 16}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0}, + {"(*Profile).Count", Method, 0}, + {"(*Profile).Name", Method, 0}, + {"(*Profile).Remove", Method, 0}, + {"(*Profile).WriteTo", Method, 0}, + {"Do", Func, 9}, + {"ForLabels", Func, 9}, + {"Label", Func, 9}, + {"LabelSet", Type, 9}, + {"Labels", Func, 9}, + {"Lookup", Func, 0}, + {"NewProfile", Func, 0}, + {"Profile", Type, 0}, + {"Profiles", Func, 0}, + {"SetGoroutineLabels", Func, 9}, + {"StartCPUProfile", Func, 0}, + {"StopCPUProfile", Func, 0}, + {"WithLabels", Func, 9}, + {"WriteHeapProfile", Func, 0}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11}, + {"(*Task).End", Method, 11}, + {"IsEnabled", Func, 11}, + {"Log", Func, 11}, + {"Logf", Func, 11}, + {"NewTask", Func, 11}, + {"Region", Type, 11}, + {"Start", Func, 5}, + {"StartRegion", Func, 11}, + {"Stop", Func, 5}, + {"Task", Type, 11}, + {"WithRegion", Func, 11}, + }, + "slices": { + {"BinarySearch", Func, 21}, + {"BinarySearchFunc", Func, 21}, + {"Clip", Func, 21}, + {"Clone", Func, 21}, + {"Compact", Func, 21}, + {"CompactFunc", Func, 21}, + {"Compare", Func, 21}, + {"CompareFunc", Func, 21}, + {"Concat", Func, 22}, + {"Contains", Func, 21}, + {"ContainsFunc", Func, 21}, + {"Delete", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + {"Grow", Func, 21}, + {"Index", Func, 21}, + {"IndexFunc", Func, 21}, + {"Insert", Func, 21}, + {"IsSorted", Func, 21}, + {"IsSortedFunc", Func, 21}, + {"Max", Func, 21}, + {"MaxFunc", Func, 21}, + {"Min", Func, 21}, + {"MinFunc", Func, 21}, + {"Replace", Func, 21}, + {"Reverse", Func, 21}, + {"Sort", Func, 21}, + {"SortFunc", Func, 21}, + {"SortStableFunc", Func, 21}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0}, + {"(Float64Slice).Less", Method, 0}, + {"(Float64Slice).Search", Method, 0}, + {"(Float64Slice).Sort", Method, 0}, + {"(Float64Slice).Swap", Method, 0}, + {"(IntSlice).Len", Method, 0}, + {"(IntSlice).Less", Method, 0}, + {"(IntSlice).Search", Method, 0}, + {"(IntSlice).Sort", Method, 0}, + {"(IntSlice).Swap", Method, 0}, + {"(StringSlice).Len", Method, 0}, + {"(StringSlice).Less", Method, 0}, + {"(StringSlice).Search", Method, 0}, + {"(StringSlice).Sort", Method, 0}, + {"(StringSlice).Swap", Method, 0}, + {"Find", Func, 19}, + {"Float64Slice", Type, 0}, + {"Float64s", Func, 0}, + {"Float64sAreSorted", Func, 0}, + {"IntSlice", Type, 0}, + {"Interface", Type, 0}, + {"Ints", Func, 0}, + {"IntsAreSorted", Func, 0}, + {"IsSorted", Func, 0}, + {"Reverse", Func, 1}, + {"Search", Func, 0}, + {"SearchFloat64s", Func, 0}, + {"SearchInts", Func, 0}, + {"SearchStrings", Func, 0}, + {"Slice", Func, 8}, + {"SliceIsSorted", Func, 8}, + {"SliceStable", Func, 8}, + {"Sort", Func, 0}, + {"Stable", Func, 2}, + {"StringSlice", Type, 0}, + {"Strings", Func, 0}, + {"StringsAreSorted", Func, 0}, + }, + "strconv": { + {"(*NumError).Error", Method, 0}, + {"(*NumError).Unwrap", Method, 14}, + {"AppendBool", Func, 0}, + {"AppendFloat", Func, 0}, + {"AppendInt", Func, 0}, + {"AppendQuote", Func, 0}, + {"AppendQuoteRune", Func, 0}, + {"AppendQuoteRuneToASCII", Func, 0}, + {"AppendQuoteRuneToGraphic", Func, 6}, + {"AppendQuoteToASCII", Func, 0}, + {"AppendQuoteToGraphic", Func, 6}, + {"AppendUint", Func, 0}, + {"Atoi", Func, 0}, + {"CanBackquote", Func, 0}, + {"ErrRange", Var, 0}, + {"ErrSyntax", Var, 0}, + {"FormatBool", Func, 0}, + {"FormatComplex", Func, 15}, + {"FormatFloat", Func, 0}, + {"FormatInt", Func, 0}, + {"FormatUint", Func, 0}, + {"IntSize", Const, 0}, + {"IsGraphic", Func, 6}, + {"IsPrint", Func, 0}, + {"Itoa", Func, 0}, + {"NumError", Type, 0}, + {"NumError.Err", Field, 0}, + {"NumError.Func", Field, 0}, + {"NumError.Num", Field, 0}, + {"ParseBool", Func, 0}, + {"ParseComplex", Func, 15}, + {"ParseFloat", Func, 0}, + {"ParseInt", Func, 0}, + {"ParseUint", Func, 0}, + {"Quote", Func, 0}, + {"QuoteRune", Func, 0}, + {"QuoteRuneToASCII", Func, 0}, + {"QuoteRuneToGraphic", Func, 6}, + {"QuoteToASCII", Func, 0}, + {"QuoteToGraphic", Func, 6}, + {"QuotedPrefix", Func, 17}, + {"Unquote", Func, 0}, + {"UnquoteChar", Func, 0}, + }, + "strings": { + {"(*Builder).Cap", Method, 12}, + {"(*Builder).Grow", Method, 10}, + {"(*Builder).Len", Method, 10}, + {"(*Builder).Reset", Method, 10}, + {"(*Builder).String", Method, 10}, + {"(*Builder).Write", Method, 10}, + {"(*Builder).WriteByte", Method, 10}, + {"(*Builder).WriteRune", Method, 10}, + {"(*Builder).WriteString", Method, 10}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Replacer).Replace", Method, 0}, + {"(*Replacer).WriteString", Method, 0}, + {"Builder", Type, 10}, + {"Clone", Func, 18}, + {"Compare", Func, 5}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 0}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 0}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"EqualFold", Func, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 2}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"NewReader", Func, 0}, + {"NewReplacer", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Replacer", Type, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0}, + {"(*Cond).Signal", Method, 0}, + {"(*Cond).Wait", Method, 0}, + {"(*Map).CompareAndDelete", Method, 20}, + {"(*Map).CompareAndSwap", Method, 20}, + {"(*Map).Delete", Method, 9}, + {"(*Map).Load", Method, 9}, + {"(*Map).LoadAndDelete", Method, 15}, + {"(*Map).LoadOrStore", Method, 9}, + {"(*Map).Range", Method, 9}, + {"(*Map).Store", Method, 9}, + {"(*Map).Swap", Method, 20}, + {"(*Mutex).Lock", Method, 0}, + {"(*Mutex).TryLock", Method, 18}, + {"(*Mutex).Unlock", Method, 0}, + {"(*Once).Do", Method, 0}, + {"(*Pool).Get", Method, 3}, + {"(*Pool).Put", Method, 3}, + {"(*RWMutex).Lock", Method, 0}, + {"(*RWMutex).RLock", Method, 0}, + {"(*RWMutex).RLocker", Method, 0}, + {"(*RWMutex).RUnlock", Method, 0}, + {"(*RWMutex).TryLock", Method, 18}, + {"(*RWMutex).TryRLock", Method, 18}, + {"(*RWMutex).Unlock", Method, 0}, + {"(*WaitGroup).Add", Method, 0}, + {"(*WaitGroup).Done", Method, 0}, + {"(*WaitGroup).Wait", Method, 0}, + {"Cond", Type, 0}, + {"Cond.L", Field, 0}, + {"Locker", Type, 0}, + {"Map", Type, 9}, + {"Mutex", Type, 0}, + {"NewCond", Func, 0}, + {"Once", Type, 0}, + {"OnceFunc", Func, 21}, + {"OnceValue", Func, 21}, + {"OnceValues", Func, 21}, + {"Pool", Type, 3}, + {"Pool.New", Field, 3}, + {"RWMutex", Type, 0}, + {"WaitGroup", Type, 0}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19}, + {"(*Bool).Load", Method, 19}, + {"(*Bool).Store", Method, 19}, + {"(*Bool).Swap", Method, 19}, + {"(*Int32).Add", Method, 19}, + {"(*Int32).CompareAndSwap", Method, 19}, + {"(*Int32).Load", Method, 19}, + {"(*Int32).Store", Method, 19}, + {"(*Int32).Swap", Method, 19}, + {"(*Int64).Add", Method, 19}, + {"(*Int64).CompareAndSwap", Method, 19}, + {"(*Int64).Load", Method, 19}, + {"(*Int64).Store", Method, 19}, + {"(*Int64).Swap", Method, 19}, + {"(*Pointer).CompareAndSwap", Method, 19}, + {"(*Pointer).Load", Method, 19}, + {"(*Pointer).Store", Method, 19}, + {"(*Pointer).Swap", Method, 19}, + {"(*Uint32).Add", Method, 19}, + {"(*Uint32).CompareAndSwap", Method, 19}, + {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Store", Method, 19}, + {"(*Uint32).Swap", Method, 19}, + {"(*Uint64).Add", Method, 19}, + {"(*Uint64).CompareAndSwap", Method, 19}, + {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Store", Method, 19}, + {"(*Uint64).Swap", Method, 19}, + {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).CompareAndSwap", Method, 19}, + {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Store", Method, 19}, + {"(*Uintptr).Swap", Method, 19}, + {"(*Value).CompareAndSwap", Method, 17}, + {"(*Value).Load", Method, 4}, + {"(*Value).Store", Method, 4}, + {"(*Value).Swap", Method, 17}, + {"AddInt32", Func, 0}, + {"AddInt64", Func, 0}, + {"AddUint32", Func, 0}, + {"AddUint64", Func, 0}, + {"AddUintptr", Func, 0}, + {"Bool", Type, 19}, + {"CompareAndSwapInt32", Func, 0}, + {"CompareAndSwapInt64", Func, 0}, + {"CompareAndSwapPointer", Func, 0}, + {"CompareAndSwapUint32", Func, 0}, + {"CompareAndSwapUint64", Func, 0}, + {"CompareAndSwapUintptr", Func, 0}, + {"Int32", Type, 19}, + {"Int64", Type, 19}, + {"LoadInt32", Func, 0}, + {"LoadInt64", Func, 0}, + {"LoadPointer", Func, 0}, + {"LoadUint32", Func, 0}, + {"LoadUint64", Func, 0}, + {"LoadUintptr", Func, 0}, + {"Pointer", Type, 19}, + {"StoreInt32", Func, 0}, + {"StoreInt64", Func, 0}, + {"StorePointer", Func, 0}, + {"StoreUint32", Func, 0}, + {"StoreUint64", Func, 0}, + {"StoreUintptr", Func, 0}, + {"SwapInt32", Func, 2}, + {"SwapInt64", Func, 2}, + {"SwapPointer", Func, 2}, + {"SwapUint32", Func, 2}, + {"SwapUint64", Func, 2}, + {"SwapUintptr", Func, 2}, + {"Uint32", Type, 19}, + {"Uint64", Type, 19}, + {"Uintptr", Type, 19}, + {"Value", Type, 4}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0}, + {"(*DLL).FindProc", Method, 0}, + {"(*DLL).MustFindProc", Method, 0}, + {"(*DLL).Release", Method, 0}, + {"(*DLLError).Error", Method, 0}, + {"(*DLLError).Unwrap", Method, 16}, + {"(*Filetime).Nanoseconds", Method, 0}, + {"(*Iovec).SetLen", Method, 0}, + {"(*LazyDLL).Handle", Method, 0}, + {"(*LazyDLL).Load", Method, 0}, + {"(*LazyDLL).NewProc", Method, 0}, + {"(*LazyProc).Addr", Method, 0}, + {"(*LazyProc).Call", Method, 0}, + {"(*LazyProc).Find", Method, 0}, + {"(*Msghdr).SetControllen", Method, 0}, + {"(*Proc).Addr", Method, 0}, + {"(*Proc).Call", Method, 0}, + {"(*PtraceRegs).PC", Method, 0}, + {"(*PtraceRegs).SetPC", Method, 0}, + {"(*RawSockaddrAny).Sockaddr", Method, 0}, + {"(*SID).Copy", Method, 0}, + {"(*SID).Len", Method, 0}, + {"(*SID).LookupAccount", Method, 0}, + {"(*SID).String", Method, 0}, + {"(*Timespec).Nano", Method, 0}, + {"(*Timespec).Unix", Method, 0}, + {"(*Timeval).Nano", Method, 0}, + {"(*Timeval).Nanoseconds", Method, 0}, + {"(*Timeval).Unix", Method, 0}, + {"(Errno).Error", Method, 0}, + {"(Errno).Is", Method, 13}, + {"(Errno).Temporary", Method, 0}, + {"(Errno).Timeout", Method, 0}, + {"(Signal).Signal", Method, 0}, + {"(Signal).String", Method, 0}, + {"(Token).Close", Method, 0}, + {"(Token).GetTokenPrimaryGroup", Method, 0}, + {"(Token).GetTokenUser", Method, 0}, + {"(Token).GetUserProfileDirectory", Method, 0}, + {"(WaitStatus).Continued", Method, 0}, + {"(WaitStatus).CoreDump", Method, 0}, + {"(WaitStatus).ExitStatus", Method, 0}, + {"(WaitStatus).Exited", Method, 0}, + {"(WaitStatus).Signal", Method, 0}, + {"(WaitStatus).Signaled", Method, 0}, + {"(WaitStatus).StopSignal", Method, 0}, + {"(WaitStatus).Stopped", Method, 0}, + {"(WaitStatus).TrapCause", Method, 0}, + {"AF_ALG", Const, 0}, + {"AF_APPLETALK", Const, 0}, + {"AF_ARP", Const, 0}, + {"AF_ASH", Const, 0}, + {"AF_ATM", Const, 0}, + {"AF_ATMPVC", Const, 0}, + {"AF_ATMSVC", Const, 0}, + {"AF_AX25", Const, 0}, + {"AF_BLUETOOTH", Const, 0}, + {"AF_BRIDGE", Const, 0}, + {"AF_CAIF", Const, 0}, + {"AF_CAN", Const, 0}, + {"AF_CCITT", Const, 0}, + {"AF_CHAOS", Const, 0}, + {"AF_CNT", Const, 0}, + {"AF_COIP", Const, 0}, + {"AF_DATAKIT", Const, 0}, + {"AF_DECnet", Const, 0}, + {"AF_DLI", Const, 0}, + {"AF_E164", Const, 0}, + {"AF_ECMA", Const, 0}, + {"AF_ECONET", Const, 0}, + {"AF_ENCAP", Const, 1}, + {"AF_FILE", Const, 0}, + {"AF_HYLINK", Const, 0}, + {"AF_IEEE80211", Const, 0}, + {"AF_IEEE802154", Const, 0}, + {"AF_IMPLINK", Const, 0}, + {"AF_INET", Const, 0}, + {"AF_INET6", Const, 0}, + {"AF_INET6_SDP", Const, 3}, + {"AF_INET_SDP", Const, 3}, + {"AF_IPX", Const, 0}, + {"AF_IRDA", Const, 0}, + {"AF_ISDN", Const, 0}, + {"AF_ISO", Const, 0}, + {"AF_IUCV", Const, 0}, + {"AF_KEY", Const, 0}, + {"AF_LAT", Const, 0}, + {"AF_LINK", Const, 0}, + {"AF_LLC", Const, 0}, + {"AF_LOCAL", Const, 0}, + {"AF_MAX", Const, 0}, + {"AF_MPLS", Const, 1}, + {"AF_NATM", Const, 0}, + {"AF_NDRV", Const, 0}, + {"AF_NETBEUI", Const, 0}, + {"AF_NETBIOS", Const, 0}, + {"AF_NETGRAPH", Const, 0}, + {"AF_NETLINK", Const, 0}, + {"AF_NETROM", Const, 0}, + {"AF_NS", Const, 0}, + {"AF_OROUTE", Const, 1}, + {"AF_OSI", Const, 0}, + {"AF_PACKET", Const, 0}, + {"AF_PHONET", Const, 0}, + {"AF_PPP", Const, 0}, + {"AF_PPPOX", Const, 0}, + {"AF_PUP", Const, 0}, + {"AF_RDS", Const, 0}, + {"AF_RESERVED_36", Const, 0}, + {"AF_ROSE", Const, 0}, + {"AF_ROUTE", Const, 0}, + {"AF_RXRPC", Const, 0}, + {"AF_SCLUSTER", Const, 0}, + {"AF_SECURITY", Const, 0}, + {"AF_SIP", Const, 0}, + {"AF_SLOW", Const, 0}, + {"AF_SNA", Const, 0}, + {"AF_SYSTEM", Const, 0}, + {"AF_TIPC", Const, 0}, + {"AF_UNIX", Const, 0}, + {"AF_UNSPEC", Const, 0}, + {"AF_UTUN", Const, 16}, + {"AF_VENDOR00", Const, 0}, + {"AF_VENDOR01", Const, 0}, + {"AF_VENDOR02", Const, 0}, + {"AF_VENDOR03", Const, 0}, + {"AF_VENDOR04", Const, 0}, + {"AF_VENDOR05", Const, 0}, + {"AF_VENDOR06", Const, 0}, + {"AF_VENDOR07", Const, 0}, + {"AF_VENDOR08", Const, 0}, + {"AF_VENDOR09", Const, 0}, + {"AF_VENDOR10", Const, 0}, + {"AF_VENDOR11", Const, 0}, + {"AF_VENDOR12", Const, 0}, + {"AF_VENDOR13", Const, 0}, + {"AF_VENDOR14", Const, 0}, + {"AF_VENDOR15", Const, 0}, + {"AF_VENDOR16", Const, 0}, + {"AF_VENDOR17", Const, 0}, + {"AF_VENDOR18", Const, 0}, + {"AF_VENDOR19", Const, 0}, + {"AF_VENDOR20", Const, 0}, + {"AF_VENDOR21", Const, 0}, + {"AF_VENDOR22", Const, 0}, + {"AF_VENDOR23", Const, 0}, + {"AF_VENDOR24", Const, 0}, + {"AF_VENDOR25", Const, 0}, + {"AF_VENDOR26", Const, 0}, + {"AF_VENDOR27", Const, 0}, + {"AF_VENDOR28", Const, 0}, + {"AF_VENDOR29", Const, 0}, + {"AF_VENDOR30", Const, 0}, + {"AF_VENDOR31", Const, 0}, + {"AF_VENDOR32", Const, 0}, + {"AF_VENDOR33", Const, 0}, + {"AF_VENDOR34", Const, 0}, + {"AF_VENDOR35", Const, 0}, + {"AF_VENDOR36", Const, 0}, + {"AF_VENDOR37", Const, 0}, + {"AF_VENDOR38", Const, 0}, + {"AF_VENDOR39", Const, 0}, + {"AF_VENDOR40", Const, 0}, + {"AF_VENDOR41", Const, 0}, + {"AF_VENDOR42", Const, 0}, + {"AF_VENDOR43", Const, 0}, + {"AF_VENDOR44", Const, 0}, + {"AF_VENDOR45", Const, 0}, + {"AF_VENDOR46", Const, 0}, + {"AF_VENDOR47", Const, 0}, + {"AF_WANPIPE", Const, 0}, + {"AF_X25", Const, 0}, + {"AI_CANONNAME", Const, 1}, + {"AI_NUMERICHOST", Const, 1}, + {"AI_PASSIVE", Const, 1}, + {"APPLICATION_ERROR", Const, 0}, + {"ARPHRD_ADAPT", Const, 0}, + {"ARPHRD_APPLETLK", Const, 0}, + {"ARPHRD_ARCNET", Const, 0}, + {"ARPHRD_ASH", Const, 0}, + {"ARPHRD_ATM", Const, 0}, + {"ARPHRD_AX25", Const, 0}, + {"ARPHRD_BIF", Const, 0}, + {"ARPHRD_CHAOS", Const, 0}, + {"ARPHRD_CISCO", Const, 0}, + {"ARPHRD_CSLIP", Const, 0}, + {"ARPHRD_CSLIP6", Const, 0}, + {"ARPHRD_DDCMP", Const, 0}, + {"ARPHRD_DLCI", Const, 0}, + {"ARPHRD_ECONET", Const, 0}, + {"ARPHRD_EETHER", Const, 0}, + {"ARPHRD_ETHER", Const, 0}, + {"ARPHRD_EUI64", Const, 0}, + {"ARPHRD_FCAL", Const, 0}, + {"ARPHRD_FCFABRIC", Const, 0}, + {"ARPHRD_FCPL", Const, 0}, + {"ARPHRD_FCPP", Const, 0}, + {"ARPHRD_FDDI", Const, 0}, + {"ARPHRD_FRAD", Const, 0}, + {"ARPHRD_FRELAY", Const, 1}, + {"ARPHRD_HDLC", Const, 0}, + {"ARPHRD_HIPPI", Const, 0}, + {"ARPHRD_HWX25", Const, 0}, + {"ARPHRD_IEEE1394", Const, 0}, + {"ARPHRD_IEEE802", Const, 0}, + {"ARPHRD_IEEE80211", Const, 0}, + {"ARPHRD_IEEE80211_PRISM", Const, 0}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0}, + {"ARPHRD_IEEE802154", Const, 0}, + {"ARPHRD_IEEE802154_PHY", Const, 0}, + {"ARPHRD_IEEE802_TR", Const, 0}, + {"ARPHRD_INFINIBAND", Const, 0}, + {"ARPHRD_IPDDP", Const, 0}, + {"ARPHRD_IPGRE", Const, 0}, + {"ARPHRD_IRDA", Const, 0}, + {"ARPHRD_LAPB", Const, 0}, + {"ARPHRD_LOCALTLK", Const, 0}, + {"ARPHRD_LOOPBACK", Const, 0}, + {"ARPHRD_METRICOM", Const, 0}, + {"ARPHRD_NETROM", Const, 0}, + {"ARPHRD_NONE", Const, 0}, + {"ARPHRD_PIMREG", Const, 0}, + {"ARPHRD_PPP", Const, 0}, + {"ARPHRD_PRONET", Const, 0}, + {"ARPHRD_RAWHDLC", Const, 0}, + {"ARPHRD_ROSE", Const, 0}, + {"ARPHRD_RSRVD", Const, 0}, + {"ARPHRD_SIT", Const, 0}, + {"ARPHRD_SKIP", Const, 0}, + {"ARPHRD_SLIP", Const, 0}, + {"ARPHRD_SLIP6", Const, 0}, + {"ARPHRD_STRIP", Const, 1}, + {"ARPHRD_TUNNEL", Const, 0}, + {"ARPHRD_TUNNEL6", Const, 0}, + {"ARPHRD_VOID", Const, 0}, + {"ARPHRD_X25", Const, 0}, + {"AUTHTYPE_CLIENT", Const, 0}, + {"AUTHTYPE_SERVER", Const, 0}, + {"Accept", Func, 0}, + {"Accept4", Func, 1}, + {"AcceptEx", Func, 0}, + {"Access", Func, 0}, + {"Acct", Func, 0}, + {"AddrinfoW", Type, 1}, + {"AddrinfoW.Addr", Field, 1}, + {"AddrinfoW.Addrlen", Field, 1}, + {"AddrinfoW.Canonname", Field, 1}, + {"AddrinfoW.Family", Field, 1}, + {"AddrinfoW.Flags", Field, 1}, + {"AddrinfoW.Next", Field, 1}, + {"AddrinfoW.Protocol", Field, 1}, + {"AddrinfoW.Socktype", Field, 1}, + {"Adjtime", Func, 0}, + {"Adjtimex", Func, 0}, + {"AllThreadsSyscall", Func, 16}, + {"AllThreadsSyscall6", Func, 16}, + {"AttachLsf", Func, 0}, + {"B0", Const, 0}, + {"B1000000", Const, 0}, + {"B110", Const, 0}, + {"B115200", Const, 0}, + {"B1152000", Const, 0}, + {"B1200", Const, 0}, + {"B134", Const, 0}, + {"B14400", Const, 1}, + {"B150", Const, 0}, + {"B1500000", Const, 0}, + {"B1800", Const, 0}, + {"B19200", Const, 0}, + {"B200", Const, 0}, + {"B2000000", Const, 0}, + {"B230400", Const, 0}, + {"B2400", Const, 0}, + {"B2500000", Const, 0}, + {"B28800", Const, 1}, + {"B300", Const, 0}, + {"B3000000", Const, 0}, + {"B3500000", Const, 0}, + {"B38400", Const, 0}, + {"B4000000", Const, 0}, + {"B460800", Const, 0}, + {"B4800", Const, 0}, + {"B50", Const, 0}, + {"B500000", Const, 0}, + {"B57600", Const, 0}, + {"B576000", Const, 0}, + {"B600", Const, 0}, + {"B7200", Const, 1}, + {"B75", Const, 0}, + {"B76800", Const, 1}, + {"B921600", Const, 0}, + {"B9600", Const, 0}, + {"BASE_PROTOCOL", Const, 2}, + {"BIOCFEEDBACK", Const, 0}, + {"BIOCFLUSH", Const, 0}, + {"BIOCGBLEN", Const, 0}, + {"BIOCGDIRECTION", Const, 0}, + {"BIOCGDIRFILT", Const, 1}, + {"BIOCGDLT", Const, 0}, + {"BIOCGDLTLIST", Const, 0}, + {"BIOCGETBUFMODE", Const, 0}, + {"BIOCGETIF", Const, 0}, + {"BIOCGETZMAX", Const, 0}, + {"BIOCGFEEDBACK", Const, 1}, + {"BIOCGFILDROP", Const, 1}, + {"BIOCGHDRCMPLT", Const, 0}, + {"BIOCGRSIG", Const, 0}, + {"BIOCGRTIMEOUT", Const, 0}, + {"BIOCGSEESENT", Const, 0}, + {"BIOCGSTATS", Const, 0}, + {"BIOCGSTATSOLD", Const, 1}, + {"BIOCGTSTAMP", Const, 1}, + {"BIOCIMMEDIATE", Const, 0}, + {"BIOCLOCK", Const, 0}, + {"BIOCPROMISC", Const, 0}, + {"BIOCROTZBUF", Const, 0}, + {"BIOCSBLEN", Const, 0}, + {"BIOCSDIRECTION", Const, 0}, + {"BIOCSDIRFILT", Const, 1}, + {"BIOCSDLT", Const, 0}, + {"BIOCSETBUFMODE", Const, 0}, + {"BIOCSETF", Const, 0}, + {"BIOCSETFNR", Const, 0}, + {"BIOCSETIF", Const, 0}, + {"BIOCSETWF", Const, 0}, + {"BIOCSETZBUF", Const, 0}, + {"BIOCSFEEDBACK", Const, 1}, + {"BIOCSFILDROP", Const, 1}, + {"BIOCSHDRCMPLT", Const, 0}, + {"BIOCSRSIG", Const, 0}, + {"BIOCSRTIMEOUT", Const, 0}, + {"BIOCSSEESENT", Const, 0}, + {"BIOCSTCPF", Const, 1}, + {"BIOCSTSTAMP", Const, 1}, + {"BIOCSUDPF", Const, 1}, + {"BIOCVERSION", Const, 0}, + {"BPF_A", Const, 0}, + {"BPF_ABS", Const, 0}, + {"BPF_ADD", Const, 0}, + {"BPF_ALIGNMENT", Const, 0}, + {"BPF_ALIGNMENT32", Const, 1}, + {"BPF_ALU", Const, 0}, + {"BPF_AND", Const, 0}, + {"BPF_B", Const, 0}, + {"BPF_BUFMODE_BUFFER", Const, 0}, + {"BPF_BUFMODE_ZBUF", Const, 0}, + {"BPF_DFLTBUFSIZE", Const, 1}, + {"BPF_DIRECTION_IN", Const, 1}, + {"BPF_DIRECTION_OUT", Const, 1}, + {"BPF_DIV", Const, 0}, + {"BPF_H", Const, 0}, + {"BPF_IMM", Const, 0}, + {"BPF_IND", Const, 0}, + {"BPF_JA", Const, 0}, + {"BPF_JEQ", Const, 0}, + {"BPF_JGE", Const, 0}, + {"BPF_JGT", Const, 0}, + {"BPF_JMP", Const, 0}, + {"BPF_JSET", Const, 0}, + {"BPF_K", Const, 0}, + {"BPF_LD", Const, 0}, + {"BPF_LDX", Const, 0}, + {"BPF_LEN", Const, 0}, + {"BPF_LSH", Const, 0}, + {"BPF_MAJOR_VERSION", Const, 0}, + {"BPF_MAXBUFSIZE", Const, 0}, + {"BPF_MAXINSNS", Const, 0}, + {"BPF_MEM", Const, 0}, + {"BPF_MEMWORDS", Const, 0}, + {"BPF_MINBUFSIZE", Const, 0}, + {"BPF_MINOR_VERSION", Const, 0}, + {"BPF_MISC", Const, 0}, + {"BPF_MSH", Const, 0}, + {"BPF_MUL", Const, 0}, + {"BPF_NEG", Const, 0}, + {"BPF_OR", Const, 0}, + {"BPF_RELEASE", Const, 0}, + {"BPF_RET", Const, 0}, + {"BPF_RSH", Const, 0}, + {"BPF_ST", Const, 0}, + {"BPF_STX", Const, 0}, + {"BPF_SUB", Const, 0}, + {"BPF_TAX", Const, 0}, + {"BPF_TXA", Const, 0}, + {"BPF_T_BINTIME", Const, 1}, + {"BPF_T_BINTIME_FAST", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_FAST", Const, 1}, + {"BPF_T_FLAG_MASK", Const, 1}, + {"BPF_T_FORMAT_MASK", Const, 1}, + {"BPF_T_MICROTIME", Const, 1}, + {"BPF_T_MICROTIME_FAST", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_MONOTONIC", Const, 1}, + {"BPF_T_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NANOTIME", Const, 1}, + {"BPF_T_NANOTIME_FAST", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NONE", Const, 1}, + {"BPF_T_NORMAL", Const, 1}, + {"BPF_W", Const, 0}, + {"BPF_X", Const, 0}, + {"BRKINT", Const, 0}, + {"Bind", Func, 0}, + {"BindToDevice", Func, 0}, + {"BpfBuflen", Func, 0}, + {"BpfDatalink", Func, 0}, + {"BpfHdr", Type, 0}, + {"BpfHdr.Caplen", Field, 0}, + {"BpfHdr.Datalen", Field, 0}, + {"BpfHdr.Hdrlen", Field, 0}, + {"BpfHdr.Pad_cgo_0", Field, 0}, + {"BpfHdr.Tstamp", Field, 0}, + {"BpfHeadercmpl", Func, 0}, + {"BpfInsn", Type, 0}, + {"BpfInsn.Code", Field, 0}, + {"BpfInsn.Jf", Field, 0}, + {"BpfInsn.Jt", Field, 0}, + {"BpfInsn.K", Field, 0}, + {"BpfInterface", Func, 0}, + {"BpfJump", Func, 0}, + {"BpfProgram", Type, 0}, + {"BpfProgram.Insns", Field, 0}, + {"BpfProgram.Len", Field, 0}, + {"BpfProgram.Pad_cgo_0", Field, 0}, + {"BpfStat", Type, 0}, + {"BpfStat.Capt", Field, 2}, + {"BpfStat.Drop", Field, 0}, + {"BpfStat.Padding", Field, 2}, + {"BpfStat.Recv", Field, 0}, + {"BpfStats", Func, 0}, + {"BpfStmt", Func, 0}, + {"BpfTimeout", Func, 0}, + {"BpfTimeval", Type, 2}, + {"BpfTimeval.Sec", Field, 2}, + {"BpfTimeval.Usec", Field, 2}, + {"BpfVersion", Type, 0}, + {"BpfVersion.Major", Field, 0}, + {"BpfVersion.Minor", Field, 0}, + {"BpfZbuf", Type, 0}, + {"BpfZbuf.Bufa", Field, 0}, + {"BpfZbuf.Bufb", Field, 0}, + {"BpfZbuf.Buflen", Field, 0}, + {"BpfZbufHeader", Type, 0}, + {"BpfZbufHeader.Kernel_gen", Field, 0}, + {"BpfZbufHeader.Kernel_len", Field, 0}, + {"BpfZbufHeader.User_gen", Field, 0}, + {"BpfZbufHeader.X_bzh_pad", Field, 0}, + {"ByHandleFileInformation", Type, 0}, + {"ByHandleFileInformation.CreationTime", Field, 0}, + {"ByHandleFileInformation.FileAttributes", Field, 0}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0}, + {"ByHandleFileInformation.FileIndexLow", Field, 0}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0}, + {"ByHandleFileInformation.FileSizeLow", Field, 0}, + {"ByHandleFileInformation.LastAccessTime", Field, 0}, + {"ByHandleFileInformation.LastWriteTime", Field, 0}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0}, + {"BytePtrFromString", Func, 1}, + {"ByteSliceFromString", Func, 1}, + {"CCR0_FLUSH", Const, 1}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0}, + {"CERT_CHAIN_POLICY_BASE", Const, 0}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_CHAIN_POLICY_EV", Const, 0}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0}, + {"CERT_CHAIN_POLICY_SSL", Const, 0}, + {"CERT_E_CN_NO_MATCH", Const, 0}, + {"CERT_E_EXPIRED", Const, 0}, + {"CERT_E_PURPOSE", Const, 0}, + {"CERT_E_ROLE", Const, 0}, + {"CERT_E_UNTRUSTEDROOT", Const, 0}, + {"CERT_STORE_ADD_ALWAYS", Const, 0}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0}, + {"CERT_STORE_PROV_MEMORY", Const, 0}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_IS_CYCLIC", Const, 0}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0}, + {"CERT_TRUST_IS_REVOKED", Const, 0}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0}, + {"CERT_TRUST_NO_ERROR", Const, 0}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0}, + {"CFLUSH", Const, 1}, + {"CLOCAL", Const, 0}, + {"CLONE_CHILD_CLEARTID", Const, 2}, + {"CLONE_CHILD_SETTID", Const, 2}, + {"CLONE_CLEAR_SIGHAND", Const, 20}, + {"CLONE_CSIGNAL", Const, 3}, + {"CLONE_DETACHED", Const, 2}, + {"CLONE_FILES", Const, 2}, + {"CLONE_FS", Const, 2}, + {"CLONE_INTO_CGROUP", Const, 20}, + {"CLONE_IO", Const, 2}, + {"CLONE_NEWCGROUP", Const, 20}, + {"CLONE_NEWIPC", Const, 2}, + {"CLONE_NEWNET", Const, 2}, + {"CLONE_NEWNS", Const, 2}, + {"CLONE_NEWPID", Const, 2}, + {"CLONE_NEWTIME", Const, 20}, + {"CLONE_NEWUSER", Const, 2}, + {"CLONE_NEWUTS", Const, 2}, + {"CLONE_PARENT", Const, 2}, + {"CLONE_PARENT_SETTID", Const, 2}, + {"CLONE_PID", Const, 3}, + {"CLONE_PIDFD", Const, 20}, + {"CLONE_PTRACE", Const, 2}, + {"CLONE_SETTLS", Const, 2}, + {"CLONE_SIGHAND", Const, 2}, + {"CLONE_SYSVSEM", Const, 2}, + {"CLONE_THREAD", Const, 2}, + {"CLONE_UNTRACED", Const, 2}, + {"CLONE_VFORK", Const, 2}, + {"CLONE_VM", Const, 2}, + {"CPUID_CFLUSH", Const, 1}, + {"CREAD", Const, 0}, + {"CREATE_ALWAYS", Const, 0}, + {"CREATE_NEW", Const, 0}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0}, + {"CRYPT_DELETEKEYSET", Const, 0}, + {"CRYPT_MACHINE_KEYSET", Const, 0}, + {"CRYPT_NEWKEYSET", Const, 0}, + {"CRYPT_SILENT", Const, 0}, + {"CRYPT_VERIFYCONTEXT", Const, 0}, + {"CS5", Const, 0}, + {"CS6", Const, 0}, + {"CS7", Const, 0}, + {"CS8", Const, 0}, + {"CSIZE", Const, 0}, + {"CSTART", Const, 1}, + {"CSTATUS", Const, 1}, + {"CSTOP", Const, 1}, + {"CSTOPB", Const, 0}, + {"CSUSP", Const, 1}, + {"CTL_MAXNAME", Const, 0}, + {"CTL_NET", Const, 0}, + {"CTL_QUERY", Const, 1}, + {"CTRL_BREAK_EVENT", Const, 1}, + {"CTRL_CLOSE_EVENT", Const, 14}, + {"CTRL_C_EVENT", Const, 1}, + {"CTRL_LOGOFF_EVENT", Const, 14}, + {"CTRL_SHUTDOWN_EVENT", Const, 14}, + {"CancelIo", Func, 0}, + {"CancelIoEx", Func, 1}, + {"CertAddCertificateContextToStore", Func, 0}, + {"CertChainContext", Type, 0}, + {"CertChainContext.ChainCount", Field, 0}, + {"CertChainContext.Chains", Field, 0}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0}, + {"CertChainContext.LowerQualityChainCount", Field, 0}, + {"CertChainContext.LowerQualityChains", Field, 0}, + {"CertChainContext.RevocationFreshnessTime", Field, 0}, + {"CertChainContext.Size", Field, 0}, + {"CertChainContext.TrustStatus", Field, 0}, + {"CertChainElement", Type, 0}, + {"CertChainElement.ApplicationUsage", Field, 0}, + {"CertChainElement.CertContext", Field, 0}, + {"CertChainElement.ExtendedErrorInfo", Field, 0}, + {"CertChainElement.IssuanceUsage", Field, 0}, + {"CertChainElement.RevocationInfo", Field, 0}, + {"CertChainElement.Size", Field, 0}, + {"CertChainElement.TrustStatus", Field, 0}, + {"CertChainPara", Type, 0}, + {"CertChainPara.CacheResync", Field, 0}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0}, + {"CertChainPara.RequestedUsage", Field, 0}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0}, + {"CertChainPara.RevocationFreshnessTime", Field, 0}, + {"CertChainPara.Size", Field, 0}, + {"CertChainPara.URLRetrievalTimeout", Field, 0}, + {"CertChainPolicyPara", Type, 0}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0}, + {"CertChainPolicyPara.Flags", Field, 0}, + {"CertChainPolicyPara.Size", Field, 0}, + {"CertChainPolicyStatus", Type, 0}, + {"CertChainPolicyStatus.ChainIndex", Field, 0}, + {"CertChainPolicyStatus.ElementIndex", Field, 0}, + {"CertChainPolicyStatus.Error", Field, 0}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0}, + {"CertChainPolicyStatus.Size", Field, 0}, + {"CertCloseStore", Func, 0}, + {"CertContext", Type, 0}, + {"CertContext.CertInfo", Field, 0}, + {"CertContext.EncodedCert", Field, 0}, + {"CertContext.EncodingType", Field, 0}, + {"CertContext.Length", Field, 0}, + {"CertContext.Store", Field, 0}, + {"CertCreateCertificateContext", Func, 0}, + {"CertEnhKeyUsage", Type, 0}, + {"CertEnhKeyUsage.Length", Field, 0}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0}, + {"CertEnumCertificatesInStore", Func, 0}, + {"CertFreeCertificateChain", Func, 0}, + {"CertFreeCertificateContext", Func, 0}, + {"CertGetCertificateChain", Func, 0}, + {"CertInfo", Type, 11}, + {"CertOpenStore", Func, 0}, + {"CertOpenSystemStore", Func, 0}, + {"CertRevocationCrlInfo", Type, 11}, + {"CertRevocationInfo", Type, 0}, + {"CertRevocationInfo.CrlInfo", Field, 0}, + {"CertRevocationInfo.FreshnessTime", Field, 0}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0}, + {"CertRevocationInfo.RevocationOid", Field, 0}, + {"CertRevocationInfo.RevocationResult", Field, 0}, + {"CertRevocationInfo.Size", Field, 0}, + {"CertSimpleChain", Type, 0}, + {"CertSimpleChain.Elements", Field, 0}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.NumElements", Field, 0}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.Size", Field, 0}, + {"CertSimpleChain.TrustListInfo", Field, 0}, + {"CertSimpleChain.TrustStatus", Field, 0}, + {"CertTrustListInfo", Type, 11}, + {"CertTrustStatus", Type, 0}, + {"CertTrustStatus.ErrorStatus", Field, 0}, + {"CertTrustStatus.InfoStatus", Field, 0}, + {"CertUsageMatch", Type, 0}, + {"CertUsageMatch.Type", Field, 0}, + {"CertUsageMatch.Usage", Field, 0}, + {"CertVerifyCertificateChainPolicy", Func, 0}, + {"Chdir", Func, 0}, + {"CheckBpfVersion", Func, 0}, + {"Chflags", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chroot", Func, 0}, + {"Clearenv", Func, 0}, + {"Close", Func, 0}, + {"CloseHandle", Func, 0}, + {"CloseOnExec", Func, 0}, + {"Closesocket", Func, 0}, + {"CmsgLen", Func, 0}, + {"CmsgSpace", Func, 0}, + {"Cmsghdr", Type, 0}, + {"Cmsghdr.Len", Field, 0}, + {"Cmsghdr.Level", Field, 0}, + {"Cmsghdr.Type", Field, 0}, + {"Cmsghdr.X__cmsg_data", Field, 0}, + {"CommandLineToArgv", Func, 0}, + {"ComputerName", Func, 0}, + {"Conn", Type, 9}, + {"Connect", Func, 0}, + {"ConnectEx", Func, 1}, + {"ConvertSidToStringSid", Func, 0}, + {"ConvertStringSidToSid", Func, 0}, + {"CopySid", Func, 0}, + {"Creat", Func, 0}, + {"CreateDirectory", Func, 0}, + {"CreateFile", Func, 0}, + {"CreateFileMapping", Func, 0}, + {"CreateHardLink", Func, 4}, + {"CreateIoCompletionPort", Func, 0}, + {"CreatePipe", Func, 0}, + {"CreateProcess", Func, 0}, + {"CreateProcessAsUser", Func, 10}, + {"CreateSymbolicLink", Func, 4}, + {"CreateToolhelp32Snapshot", Func, 4}, + {"Credential", Type, 0}, + {"Credential.Gid", Field, 0}, + {"Credential.Groups", Field, 0}, + {"Credential.NoSetGroups", Field, 9}, + {"Credential.Uid", Field, 0}, + {"CryptAcquireContext", Func, 0}, + {"CryptGenRandom", Func, 0}, + {"CryptReleaseContext", Func, 0}, + {"DIOCBSFLUSH", Const, 1}, + {"DIOCOSFPFLUSH", Const, 1}, + {"DLL", Type, 0}, + {"DLL.Handle", Field, 0}, + {"DLL.Name", Field, 0}, + {"DLLError", Type, 0}, + {"DLLError.Err", Field, 0}, + {"DLLError.Msg", Field, 0}, + {"DLLError.ObjName", Field, 0}, + {"DLT_A429", Const, 0}, + {"DLT_A653_ICM", Const, 0}, + {"DLT_AIRONET_HEADER", Const, 0}, + {"DLT_AOS", Const, 1}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0}, + {"DLT_ARCNET", Const, 0}, + {"DLT_ARCNET_LINUX", Const, 0}, + {"DLT_ATM_CLIP", Const, 0}, + {"DLT_ATM_RFC1483", Const, 0}, + {"DLT_AURORA", Const, 0}, + {"DLT_AX25", Const, 0}, + {"DLT_AX25_KISS", Const, 0}, + {"DLT_BACNET_MS_TP", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0}, + {"DLT_CAN20B", Const, 0}, + {"DLT_CAN_SOCKETCAN", Const, 1}, + {"DLT_CHAOS", Const, 0}, + {"DLT_CHDLC", Const, 0}, + {"DLT_CISCO_IOS", Const, 0}, + {"DLT_C_HDLC", Const, 0}, + {"DLT_C_HDLC_WITH_DIR", Const, 0}, + {"DLT_DBUS", Const, 1}, + {"DLT_DECT", Const, 1}, + {"DLT_DOCSIS", Const, 0}, + {"DLT_DVB_CI", Const, 1}, + {"DLT_ECONET", Const, 0}, + {"DLT_EN10MB", Const, 0}, + {"DLT_EN3MB", Const, 0}, + {"DLT_ENC", Const, 0}, + {"DLT_ERF", Const, 0}, + {"DLT_ERF_ETH", Const, 0}, + {"DLT_ERF_POS", Const, 0}, + {"DLT_FC_2", Const, 1}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1}, + {"DLT_FDDI", Const, 0}, + {"DLT_FLEXRAY", Const, 0}, + {"DLT_FRELAY", Const, 0}, + {"DLT_FRELAY_WITH_DIR", Const, 0}, + {"DLT_GCOM_SERIAL", Const, 0}, + {"DLT_GCOM_T1E1", Const, 0}, + {"DLT_GPF_F", Const, 0}, + {"DLT_GPF_T", Const, 0}, + {"DLT_GPRS_LLC", Const, 0}, + {"DLT_GSMTAP_ABIS", Const, 1}, + {"DLT_GSMTAP_UM", Const, 1}, + {"DLT_HDLC", Const, 1}, + {"DLT_HHDLC", Const, 0}, + {"DLT_HIPPI", Const, 1}, + {"DLT_IBM_SN", Const, 0}, + {"DLT_IBM_SP", Const, 0}, + {"DLT_IEEE802", Const, 0}, + {"DLT_IEEE802_11", Const, 0}, + {"DLT_IEEE802_11_RADIO", Const, 0}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0}, + {"DLT_IEEE802_15_4", Const, 0}, + {"DLT_IEEE802_15_4_LINUX", Const, 0}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0}, + {"DLT_IPFILTER", Const, 0}, + {"DLT_IPMB", Const, 0}, + {"DLT_IPMB_LINUX", Const, 0}, + {"DLT_IPNET", Const, 1}, + {"DLT_IPOIB", Const, 1}, + {"DLT_IPV4", Const, 1}, + {"DLT_IPV6", Const, 1}, + {"DLT_IP_OVER_FC", Const, 0}, + {"DLT_JUNIPER_ATM1", Const, 0}, + {"DLT_JUNIPER_ATM2", Const, 0}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1}, + {"DLT_JUNIPER_CHDLC", Const, 0}, + {"DLT_JUNIPER_ES", Const, 0}, + {"DLT_JUNIPER_ETHER", Const, 0}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1}, + {"DLT_JUNIPER_FRELAY", Const, 0}, + {"DLT_JUNIPER_GGSN", Const, 0}, + {"DLT_JUNIPER_ISM", Const, 0}, + {"DLT_JUNIPER_MFR", Const, 0}, + {"DLT_JUNIPER_MLFR", Const, 0}, + {"DLT_JUNIPER_MLPPP", Const, 0}, + {"DLT_JUNIPER_MONITOR", Const, 0}, + {"DLT_JUNIPER_PIC_PEER", Const, 0}, + {"DLT_JUNIPER_PPP", Const, 0}, + {"DLT_JUNIPER_PPPOE", Const, 0}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0}, + {"DLT_JUNIPER_SERVICES", Const, 0}, + {"DLT_JUNIPER_SRX_E2E", Const, 1}, + {"DLT_JUNIPER_ST", Const, 0}, + {"DLT_JUNIPER_VP", Const, 0}, + {"DLT_JUNIPER_VS", Const, 1}, + {"DLT_LAPB_WITH_DIR", Const, 0}, + {"DLT_LAPD", Const, 0}, + {"DLT_LIN", Const, 0}, + {"DLT_LINUX_EVDEV", Const, 1}, + {"DLT_LINUX_IRDA", Const, 0}, + {"DLT_LINUX_LAPD", Const, 0}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0}, + {"DLT_LINUX_SLL", Const, 0}, + {"DLT_LOOP", Const, 0}, + {"DLT_LTALK", Const, 0}, + {"DLT_MATCHING_MAX", Const, 1}, + {"DLT_MATCHING_MIN", Const, 1}, + {"DLT_MFR", Const, 0}, + {"DLT_MOST", Const, 0}, + {"DLT_MPEG_2_TS", Const, 1}, + {"DLT_MPLS", Const, 1}, + {"DLT_MTP2", Const, 0}, + {"DLT_MTP2_WITH_PHDR", Const, 0}, + {"DLT_MTP3", Const, 0}, + {"DLT_MUX27010", Const, 1}, + {"DLT_NETANALYZER", Const, 1}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1}, + {"DLT_NFC_LLCP", Const, 1}, + {"DLT_NFLOG", Const, 1}, + {"DLT_NG40", Const, 1}, + {"DLT_NULL", Const, 0}, + {"DLT_PCI_EXP", Const, 0}, + {"DLT_PFLOG", Const, 0}, + {"DLT_PFSYNC", Const, 0}, + {"DLT_PPI", Const, 0}, + {"DLT_PPP", Const, 0}, + {"DLT_PPP_BSDOS", Const, 0}, + {"DLT_PPP_ETHER", Const, 0}, + {"DLT_PPP_PPPD", Const, 0}, + {"DLT_PPP_SERIAL", Const, 0}, + {"DLT_PPP_WITH_DIR", Const, 0}, + {"DLT_PPP_WITH_DIRECTION", Const, 0}, + {"DLT_PRISM_HEADER", Const, 0}, + {"DLT_PRONET", Const, 0}, + {"DLT_RAIF1", Const, 0}, + {"DLT_RAW", Const, 0}, + {"DLT_RAWAF_MASK", Const, 1}, + {"DLT_RIO", Const, 0}, + {"DLT_SCCP", Const, 0}, + {"DLT_SITA", Const, 0}, + {"DLT_SLIP", Const, 0}, + {"DLT_SLIP_BSDOS", Const, 0}, + {"DLT_STANAG_5066_D_PDU", Const, 1}, + {"DLT_SUNATM", Const, 0}, + {"DLT_SYMANTEC_FIREWALL", Const, 0}, + {"DLT_TZSP", Const, 0}, + {"DLT_USB", Const, 0}, + {"DLT_USB_LINUX", Const, 0}, + {"DLT_USB_LINUX_MMAPPED", Const, 1}, + {"DLT_USER0", Const, 0}, + {"DLT_USER1", Const, 0}, + {"DLT_USER10", Const, 0}, + {"DLT_USER11", Const, 0}, + {"DLT_USER12", Const, 0}, + {"DLT_USER13", Const, 0}, + {"DLT_USER14", Const, 0}, + {"DLT_USER15", Const, 0}, + {"DLT_USER2", Const, 0}, + {"DLT_USER3", Const, 0}, + {"DLT_USER4", Const, 0}, + {"DLT_USER5", Const, 0}, + {"DLT_USER6", Const, 0}, + {"DLT_USER7", Const, 0}, + {"DLT_USER8", Const, 0}, + {"DLT_USER9", Const, 0}, + {"DLT_WIHART", Const, 1}, + {"DLT_X2E_SERIAL", Const, 0}, + {"DLT_X2E_XORAYA", Const, 0}, + {"DNSMXData", Type, 0}, + {"DNSMXData.NameExchange", Field, 0}, + {"DNSMXData.Pad", Field, 0}, + {"DNSMXData.Preference", Field, 0}, + {"DNSPTRData", Type, 0}, + {"DNSPTRData.Host", Field, 0}, + {"DNSRecord", Type, 0}, + {"DNSRecord.Data", Field, 0}, + {"DNSRecord.Dw", Field, 0}, + {"DNSRecord.Length", Field, 0}, + {"DNSRecord.Name", Field, 0}, + {"DNSRecord.Next", Field, 0}, + {"DNSRecord.Reserved", Field, 0}, + {"DNSRecord.Ttl", Field, 0}, + {"DNSRecord.Type", Field, 0}, + {"DNSSRVData", Type, 0}, + {"DNSSRVData.Pad", Field, 0}, + {"DNSSRVData.Port", Field, 0}, + {"DNSSRVData.Priority", Field, 0}, + {"DNSSRVData.Target", Field, 0}, + {"DNSSRVData.Weight", Field, 0}, + {"DNSTXTData", Type, 0}, + {"DNSTXTData.StringArray", Field, 0}, + {"DNSTXTData.StringCount", Field, 0}, + {"DNS_INFO_NO_RECORDS", Const, 4}, + {"DNS_TYPE_A", Const, 0}, + {"DNS_TYPE_A6", Const, 0}, + {"DNS_TYPE_AAAA", Const, 0}, + {"DNS_TYPE_ADDRS", Const, 0}, + {"DNS_TYPE_AFSDB", Const, 0}, + {"DNS_TYPE_ALL", Const, 0}, + {"DNS_TYPE_ANY", Const, 0}, + {"DNS_TYPE_ATMA", Const, 0}, + {"DNS_TYPE_AXFR", Const, 0}, + {"DNS_TYPE_CERT", Const, 0}, + {"DNS_TYPE_CNAME", Const, 0}, + {"DNS_TYPE_DHCID", Const, 0}, + {"DNS_TYPE_DNAME", Const, 0}, + {"DNS_TYPE_DNSKEY", Const, 0}, + {"DNS_TYPE_DS", Const, 0}, + {"DNS_TYPE_EID", Const, 0}, + {"DNS_TYPE_GID", Const, 0}, + {"DNS_TYPE_GPOS", Const, 0}, + {"DNS_TYPE_HINFO", Const, 0}, + {"DNS_TYPE_ISDN", Const, 0}, + {"DNS_TYPE_IXFR", Const, 0}, + {"DNS_TYPE_KEY", Const, 0}, + {"DNS_TYPE_KX", Const, 0}, + {"DNS_TYPE_LOC", Const, 0}, + {"DNS_TYPE_MAILA", Const, 0}, + {"DNS_TYPE_MAILB", Const, 0}, + {"DNS_TYPE_MB", Const, 0}, + {"DNS_TYPE_MD", Const, 0}, + {"DNS_TYPE_MF", Const, 0}, + {"DNS_TYPE_MG", Const, 0}, + {"DNS_TYPE_MINFO", Const, 0}, + {"DNS_TYPE_MR", Const, 0}, + {"DNS_TYPE_MX", Const, 0}, + {"DNS_TYPE_NAPTR", Const, 0}, + {"DNS_TYPE_NBSTAT", Const, 0}, + {"DNS_TYPE_NIMLOC", Const, 0}, + {"DNS_TYPE_NS", Const, 0}, + {"DNS_TYPE_NSAP", Const, 0}, + {"DNS_TYPE_NSAPPTR", Const, 0}, + {"DNS_TYPE_NSEC", Const, 0}, + {"DNS_TYPE_NULL", Const, 0}, + {"DNS_TYPE_NXT", Const, 0}, + {"DNS_TYPE_OPT", Const, 0}, + {"DNS_TYPE_PTR", Const, 0}, + {"DNS_TYPE_PX", Const, 0}, + {"DNS_TYPE_RP", Const, 0}, + {"DNS_TYPE_RRSIG", Const, 0}, + {"DNS_TYPE_RT", Const, 0}, + {"DNS_TYPE_SIG", Const, 0}, + {"DNS_TYPE_SINK", Const, 0}, + {"DNS_TYPE_SOA", Const, 0}, + {"DNS_TYPE_SRV", Const, 0}, + {"DNS_TYPE_TEXT", Const, 0}, + {"DNS_TYPE_TKEY", Const, 0}, + {"DNS_TYPE_TSIG", Const, 0}, + {"DNS_TYPE_UID", Const, 0}, + {"DNS_TYPE_UINFO", Const, 0}, + {"DNS_TYPE_UNSPEC", Const, 0}, + {"DNS_TYPE_WINS", Const, 0}, + {"DNS_TYPE_WINSR", Const, 0}, + {"DNS_TYPE_WKS", Const, 0}, + {"DNS_TYPE_X25", Const, 0}, + {"DT_BLK", Const, 0}, + {"DT_CHR", Const, 0}, + {"DT_DIR", Const, 0}, + {"DT_FIFO", Const, 0}, + {"DT_LNK", Const, 0}, + {"DT_REG", Const, 0}, + {"DT_SOCK", Const, 0}, + {"DT_UNKNOWN", Const, 0}, + {"DT_WHT", Const, 0}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0}, + {"DUPLICATE_SAME_ACCESS", Const, 0}, + {"DeleteFile", Func, 0}, + {"DetachLsf", Func, 0}, + {"DeviceIoControl", Func, 4}, + {"Dirent", Type, 0}, + {"Dirent.Fileno", Field, 0}, + {"Dirent.Ino", Field, 0}, + {"Dirent.Name", Field, 0}, + {"Dirent.Namlen", Field, 0}, + {"Dirent.Off", Field, 0}, + {"Dirent.Pad0", Field, 12}, + {"Dirent.Pad1", Field, 12}, + {"Dirent.Pad_cgo_0", Field, 0}, + {"Dirent.Reclen", Field, 0}, + {"Dirent.Seekoff", Field, 0}, + {"Dirent.Type", Field, 0}, + {"Dirent.X__d_padding", Field, 3}, + {"DnsNameCompare", Func, 4}, + {"DnsQuery", Func, 0}, + {"DnsRecordListFree", Func, 0}, + {"DnsSectionAdditional", Const, 4}, + {"DnsSectionAnswer", Const, 4}, + {"DnsSectionAuthority", Const, 4}, + {"DnsSectionQuestion", Const, 4}, + {"Dup", Func, 0}, + {"Dup2", Func, 0}, + {"Dup3", Func, 2}, + {"DuplicateHandle", Func, 0}, + {"E2BIG", Const, 0}, + {"EACCES", Const, 0}, + {"EADDRINUSE", Const, 0}, + {"EADDRNOTAVAIL", Const, 0}, + {"EADV", Const, 0}, + {"EAFNOSUPPORT", Const, 0}, + {"EAGAIN", Const, 0}, + {"EALREADY", Const, 0}, + {"EAUTH", Const, 0}, + {"EBADARCH", Const, 0}, + {"EBADE", Const, 0}, + {"EBADEXEC", Const, 0}, + {"EBADF", Const, 0}, + {"EBADFD", Const, 0}, + {"EBADMACHO", Const, 0}, + {"EBADMSG", Const, 0}, + {"EBADR", Const, 0}, + {"EBADRPC", Const, 0}, + {"EBADRQC", Const, 0}, + {"EBADSLT", Const, 0}, + {"EBFONT", Const, 0}, + {"EBUSY", Const, 0}, + {"ECANCELED", Const, 0}, + {"ECAPMODE", Const, 1}, + {"ECHILD", Const, 0}, + {"ECHO", Const, 0}, + {"ECHOCTL", Const, 0}, + {"ECHOE", Const, 0}, + {"ECHOK", Const, 0}, + {"ECHOKE", Const, 0}, + {"ECHONL", Const, 0}, + {"ECHOPRT", Const, 0}, + {"ECHRNG", Const, 0}, + {"ECOMM", Const, 0}, + {"ECONNABORTED", Const, 0}, + {"ECONNREFUSED", Const, 0}, + {"ECONNRESET", Const, 0}, + {"EDEADLK", Const, 0}, + {"EDEADLOCK", Const, 0}, + {"EDESTADDRREQ", Const, 0}, + {"EDEVERR", Const, 0}, + {"EDOM", Const, 0}, + {"EDOOFUS", Const, 0}, + {"EDOTDOT", Const, 0}, + {"EDQUOT", Const, 0}, + {"EEXIST", Const, 0}, + {"EFAULT", Const, 0}, + {"EFBIG", Const, 0}, + {"EFER_LMA", Const, 1}, + {"EFER_LME", Const, 1}, + {"EFER_NXE", Const, 1}, + {"EFER_SCE", Const, 1}, + {"EFTYPE", Const, 0}, + {"EHOSTDOWN", Const, 0}, + {"EHOSTUNREACH", Const, 0}, + {"EHWPOISON", Const, 0}, + {"EIDRM", Const, 0}, + {"EILSEQ", Const, 0}, + {"EINPROGRESS", Const, 0}, + {"EINTR", Const, 0}, + {"EINVAL", Const, 0}, + {"EIO", Const, 0}, + {"EIPSEC", Const, 1}, + {"EISCONN", Const, 0}, + {"EISDIR", Const, 0}, + {"EISNAM", Const, 0}, + {"EKEYEXPIRED", Const, 0}, + {"EKEYREJECTED", Const, 0}, + {"EKEYREVOKED", Const, 0}, + {"EL2HLT", Const, 0}, + {"EL2NSYNC", Const, 0}, + {"EL3HLT", Const, 0}, + {"EL3RST", Const, 0}, + {"ELAST", Const, 0}, + {"ELF_NGREG", Const, 0}, + {"ELF_PRARGSZ", Const, 0}, + {"ELIBACC", Const, 0}, + {"ELIBBAD", Const, 0}, + {"ELIBEXEC", Const, 0}, + {"ELIBMAX", Const, 0}, + {"ELIBSCN", Const, 0}, + {"ELNRNG", Const, 0}, + {"ELOOP", Const, 0}, + {"EMEDIUMTYPE", Const, 0}, + {"EMFILE", Const, 0}, + {"EMLINK", Const, 0}, + {"EMSGSIZE", Const, 0}, + {"EMT_TAGOVF", Const, 1}, + {"EMULTIHOP", Const, 0}, + {"EMUL_ENABLED", Const, 1}, + {"EMUL_LINUX", Const, 1}, + {"EMUL_LINUX32", Const, 1}, + {"EMUL_MAXID", Const, 1}, + {"EMUL_NATIVE", Const, 1}, + {"ENAMETOOLONG", Const, 0}, + {"ENAVAIL", Const, 0}, + {"ENDRUNDISC", Const, 1}, + {"ENEEDAUTH", Const, 0}, + {"ENETDOWN", Const, 0}, + {"ENETRESET", Const, 0}, + {"ENETUNREACH", Const, 0}, + {"ENFILE", Const, 0}, + {"ENOANO", Const, 0}, + {"ENOATTR", Const, 0}, + {"ENOBUFS", Const, 0}, + {"ENOCSI", Const, 0}, + {"ENODATA", Const, 0}, + {"ENODEV", Const, 0}, + {"ENOENT", Const, 0}, + {"ENOEXEC", Const, 0}, + {"ENOKEY", Const, 0}, + {"ENOLCK", Const, 0}, + {"ENOLINK", Const, 0}, + {"ENOMEDIUM", Const, 0}, + {"ENOMEM", Const, 0}, + {"ENOMSG", Const, 0}, + {"ENONET", Const, 0}, + {"ENOPKG", Const, 0}, + {"ENOPOLICY", Const, 0}, + {"ENOPROTOOPT", Const, 0}, + {"ENOSPC", Const, 0}, + {"ENOSR", Const, 0}, + {"ENOSTR", Const, 0}, + {"ENOSYS", Const, 0}, + {"ENOTBLK", Const, 0}, + {"ENOTCAPABLE", Const, 0}, + {"ENOTCONN", Const, 0}, + {"ENOTDIR", Const, 0}, + {"ENOTEMPTY", Const, 0}, + {"ENOTNAM", Const, 0}, + {"ENOTRECOVERABLE", Const, 0}, + {"ENOTSOCK", Const, 0}, + {"ENOTSUP", Const, 0}, + {"ENOTTY", Const, 0}, + {"ENOTUNIQ", Const, 0}, + {"ENXIO", Const, 0}, + {"EN_SW_CTL_INF", Const, 1}, + {"EN_SW_CTL_PREC", Const, 1}, + {"EN_SW_CTL_ROUND", Const, 1}, + {"EN_SW_DATACHAIN", Const, 1}, + {"EN_SW_DENORM", Const, 1}, + {"EN_SW_INVOP", Const, 1}, + {"EN_SW_OVERFLOW", Const, 1}, + {"EN_SW_PRECLOSS", Const, 1}, + {"EN_SW_UNDERFLOW", Const, 1}, + {"EN_SW_ZERODIV", Const, 1}, + {"EOPNOTSUPP", Const, 0}, + {"EOVERFLOW", Const, 0}, + {"EOWNERDEAD", Const, 0}, + {"EPERM", Const, 0}, + {"EPFNOSUPPORT", Const, 0}, + {"EPIPE", Const, 0}, + {"EPOLLERR", Const, 0}, + {"EPOLLET", Const, 0}, + {"EPOLLHUP", Const, 0}, + {"EPOLLIN", Const, 0}, + {"EPOLLMSG", Const, 0}, + {"EPOLLONESHOT", Const, 0}, + {"EPOLLOUT", Const, 0}, + {"EPOLLPRI", Const, 0}, + {"EPOLLRDBAND", Const, 0}, + {"EPOLLRDHUP", Const, 0}, + {"EPOLLRDNORM", Const, 0}, + {"EPOLLWRBAND", Const, 0}, + {"EPOLLWRNORM", Const, 0}, + {"EPOLL_CLOEXEC", Const, 0}, + {"EPOLL_CTL_ADD", Const, 0}, + {"EPOLL_CTL_DEL", Const, 0}, + {"EPOLL_CTL_MOD", Const, 0}, + {"EPOLL_NONBLOCK", Const, 0}, + {"EPROCLIM", Const, 0}, + {"EPROCUNAVAIL", Const, 0}, + {"EPROGMISMATCH", Const, 0}, + {"EPROGUNAVAIL", Const, 0}, + {"EPROTO", Const, 0}, + {"EPROTONOSUPPORT", Const, 0}, + {"EPROTOTYPE", Const, 0}, + {"EPWROFF", Const, 0}, + {"EQFULL", Const, 16}, + {"ERANGE", Const, 0}, + {"EREMCHG", Const, 0}, + {"EREMOTE", Const, 0}, + {"EREMOTEIO", Const, 0}, + {"ERESTART", Const, 0}, + {"ERFKILL", Const, 0}, + {"EROFS", Const, 0}, + {"ERPCMISMATCH", Const, 0}, + {"ERROR_ACCESS_DENIED", Const, 0}, + {"ERROR_ALREADY_EXISTS", Const, 0}, + {"ERROR_BROKEN_PIPE", Const, 0}, + {"ERROR_BUFFER_OVERFLOW", Const, 0}, + {"ERROR_DIR_NOT_EMPTY", Const, 8}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0}, + {"ERROR_FILE_EXISTS", Const, 0}, + {"ERROR_FILE_NOT_FOUND", Const, 0}, + {"ERROR_HANDLE_EOF", Const, 2}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0}, + {"ERROR_IO_PENDING", Const, 0}, + {"ERROR_MOD_NOT_FOUND", Const, 0}, + {"ERROR_MORE_DATA", Const, 3}, + {"ERROR_NETNAME_DELETED", Const, 3}, + {"ERROR_NOT_FOUND", Const, 1}, + {"ERROR_NO_MORE_FILES", Const, 0}, + {"ERROR_OPERATION_ABORTED", Const, 0}, + {"ERROR_PATH_NOT_FOUND", Const, 0}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4}, + {"ERROR_PROC_NOT_FOUND", Const, 0}, + {"ESHLIBVERS", Const, 0}, + {"ESHUTDOWN", Const, 0}, + {"ESOCKTNOSUPPORT", Const, 0}, + {"ESPIPE", Const, 0}, + {"ESRCH", Const, 0}, + {"ESRMNT", Const, 0}, + {"ESTALE", Const, 0}, + {"ESTRPIPE", Const, 0}, + {"ETHERCAP_JUMBO_MTU", Const, 1}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1}, + {"ETHERCAP_VLAN_MTU", Const, 1}, + {"ETHERMIN", Const, 1}, + {"ETHERMTU", Const, 1}, + {"ETHERMTU_JUMBO", Const, 1}, + {"ETHERTYPE_8023", Const, 1}, + {"ETHERTYPE_AARP", Const, 1}, + {"ETHERTYPE_ACCTON", Const, 1}, + {"ETHERTYPE_AEONIC", Const, 1}, + {"ETHERTYPE_ALPHA", Const, 1}, + {"ETHERTYPE_AMBER", Const, 1}, + {"ETHERTYPE_AMOEBA", Const, 1}, + {"ETHERTYPE_AOE", Const, 1}, + {"ETHERTYPE_APOLLO", Const, 1}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1}, + {"ETHERTYPE_APPLETALK", Const, 1}, + {"ETHERTYPE_APPLITEK", Const, 1}, + {"ETHERTYPE_ARGONAUT", Const, 1}, + {"ETHERTYPE_ARP", Const, 1}, + {"ETHERTYPE_AT", Const, 1}, + {"ETHERTYPE_ATALK", Const, 1}, + {"ETHERTYPE_ATOMIC", Const, 1}, + {"ETHERTYPE_ATT", Const, 1}, + {"ETHERTYPE_ATTSTANFORD", Const, 1}, + {"ETHERTYPE_AUTOPHON", Const, 1}, + {"ETHERTYPE_AXIS", Const, 1}, + {"ETHERTYPE_BCLOOP", Const, 1}, + {"ETHERTYPE_BOFL", Const, 1}, + {"ETHERTYPE_CABLETRON", Const, 1}, + {"ETHERTYPE_CHAOS", Const, 1}, + {"ETHERTYPE_COMDESIGN", Const, 1}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1}, + {"ETHERTYPE_COUNTERPOINT", Const, 1}, + {"ETHERTYPE_CRONUS", Const, 1}, + {"ETHERTYPE_CRONUSVLN", Const, 1}, + {"ETHERTYPE_DCA", Const, 1}, + {"ETHERTYPE_DDE", Const, 1}, + {"ETHERTYPE_DEBNI", Const, 1}, + {"ETHERTYPE_DECAM", Const, 1}, + {"ETHERTYPE_DECCUST", Const, 1}, + {"ETHERTYPE_DECDIAG", Const, 1}, + {"ETHERTYPE_DECDNS", Const, 1}, + {"ETHERTYPE_DECDTS", Const, 1}, + {"ETHERTYPE_DECEXPER", Const, 1}, + {"ETHERTYPE_DECLAST", Const, 1}, + {"ETHERTYPE_DECLTM", Const, 1}, + {"ETHERTYPE_DECMUMPS", Const, 1}, + {"ETHERTYPE_DECNETBIOS", Const, 1}, + {"ETHERTYPE_DELTACON", Const, 1}, + {"ETHERTYPE_DIDDLE", Const, 1}, + {"ETHERTYPE_DLOG1", Const, 1}, + {"ETHERTYPE_DLOG2", Const, 1}, + {"ETHERTYPE_DN", Const, 1}, + {"ETHERTYPE_DOGFIGHT", Const, 1}, + {"ETHERTYPE_DSMD", Const, 1}, + {"ETHERTYPE_ECMA", Const, 1}, + {"ETHERTYPE_ENCRYPT", Const, 1}, + {"ETHERTYPE_ES", Const, 1}, + {"ETHERTYPE_EXCELAN", Const, 1}, + {"ETHERTYPE_EXPERDATA", Const, 1}, + {"ETHERTYPE_FLIP", Const, 1}, + {"ETHERTYPE_FLOWCONTROL", Const, 1}, + {"ETHERTYPE_FRARP", Const, 1}, + {"ETHERTYPE_GENDYN", Const, 1}, + {"ETHERTYPE_HAYES", Const, 1}, + {"ETHERTYPE_HIPPI_FP", Const, 1}, + {"ETHERTYPE_HITACHI", Const, 1}, + {"ETHERTYPE_HP", Const, 1}, + {"ETHERTYPE_IEEEPUP", Const, 1}, + {"ETHERTYPE_IEEEPUPAT", Const, 1}, + {"ETHERTYPE_IMLBL", Const, 1}, + {"ETHERTYPE_IMLBLDIAG", Const, 1}, + {"ETHERTYPE_IP", Const, 1}, + {"ETHERTYPE_IPAS", Const, 1}, + {"ETHERTYPE_IPV6", Const, 1}, + {"ETHERTYPE_IPX", Const, 1}, + {"ETHERTYPE_IPXNEW", Const, 1}, + {"ETHERTYPE_KALPANA", Const, 1}, + {"ETHERTYPE_LANBRIDGE", Const, 1}, + {"ETHERTYPE_LANPROBE", Const, 1}, + {"ETHERTYPE_LAT", Const, 1}, + {"ETHERTYPE_LBACK", Const, 1}, + {"ETHERTYPE_LITTLE", Const, 1}, + {"ETHERTYPE_LLDP", Const, 1}, + {"ETHERTYPE_LOGICRAFT", Const, 1}, + {"ETHERTYPE_LOOPBACK", Const, 1}, + {"ETHERTYPE_MATRA", Const, 1}, + {"ETHERTYPE_MAX", Const, 1}, + {"ETHERTYPE_MERIT", Const, 1}, + {"ETHERTYPE_MICP", Const, 1}, + {"ETHERTYPE_MOPDL", Const, 1}, + {"ETHERTYPE_MOPRC", Const, 1}, + {"ETHERTYPE_MOTOROLA", Const, 1}, + {"ETHERTYPE_MPLS", Const, 1}, + {"ETHERTYPE_MPLS_MCAST", Const, 1}, + {"ETHERTYPE_MUMPS", Const, 1}, + {"ETHERTYPE_NBPCC", Const, 1}, + {"ETHERTYPE_NBPCLAIM", Const, 1}, + {"ETHERTYPE_NBPCLREQ", Const, 1}, + {"ETHERTYPE_NBPCLRSP", Const, 1}, + {"ETHERTYPE_NBPCREQ", Const, 1}, + {"ETHERTYPE_NBPCRSP", Const, 1}, + {"ETHERTYPE_NBPDG", Const, 1}, + {"ETHERTYPE_NBPDGB", Const, 1}, + {"ETHERTYPE_NBPDLTE", Const, 1}, + {"ETHERTYPE_NBPRAR", Const, 1}, + {"ETHERTYPE_NBPRAS", Const, 1}, + {"ETHERTYPE_NBPRST", Const, 1}, + {"ETHERTYPE_NBPSCD", Const, 1}, + {"ETHERTYPE_NBPVCD", Const, 1}, + {"ETHERTYPE_NBS", Const, 1}, + {"ETHERTYPE_NCD", Const, 1}, + {"ETHERTYPE_NESTAR", Const, 1}, + {"ETHERTYPE_NETBEUI", Const, 1}, + {"ETHERTYPE_NOVELL", Const, 1}, + {"ETHERTYPE_NS", Const, 1}, + {"ETHERTYPE_NSAT", Const, 1}, + {"ETHERTYPE_NSCOMPAT", Const, 1}, + {"ETHERTYPE_NTRAILER", Const, 1}, + {"ETHERTYPE_OS9", Const, 1}, + {"ETHERTYPE_OS9NET", Const, 1}, + {"ETHERTYPE_PACER", Const, 1}, + {"ETHERTYPE_PAE", Const, 1}, + {"ETHERTYPE_PCS", Const, 1}, + {"ETHERTYPE_PLANNING", Const, 1}, + {"ETHERTYPE_PPP", Const, 1}, + {"ETHERTYPE_PPPOE", Const, 1}, + {"ETHERTYPE_PPPOEDISC", Const, 1}, + {"ETHERTYPE_PRIMENTS", Const, 1}, + {"ETHERTYPE_PUP", Const, 1}, + {"ETHERTYPE_PUPAT", Const, 1}, + {"ETHERTYPE_QINQ", Const, 1}, + {"ETHERTYPE_RACAL", Const, 1}, + {"ETHERTYPE_RATIONAL", Const, 1}, + {"ETHERTYPE_RAWFR", Const, 1}, + {"ETHERTYPE_RCL", Const, 1}, + {"ETHERTYPE_RDP", Const, 1}, + {"ETHERTYPE_RETIX", Const, 1}, + {"ETHERTYPE_REVARP", Const, 1}, + {"ETHERTYPE_SCA", Const, 1}, + {"ETHERTYPE_SECTRA", Const, 1}, + {"ETHERTYPE_SECUREDATA", Const, 1}, + {"ETHERTYPE_SGITW", Const, 1}, + {"ETHERTYPE_SG_BOUNCE", Const, 1}, + {"ETHERTYPE_SG_DIAG", Const, 1}, + {"ETHERTYPE_SG_NETGAMES", Const, 1}, + {"ETHERTYPE_SG_RESV", Const, 1}, + {"ETHERTYPE_SIMNET", Const, 1}, + {"ETHERTYPE_SLOW", Const, 1}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1}, + {"ETHERTYPE_SNA", Const, 1}, + {"ETHERTYPE_SNMP", Const, 1}, + {"ETHERTYPE_SONIX", Const, 1}, + {"ETHERTYPE_SPIDER", Const, 1}, + {"ETHERTYPE_SPRITE", Const, 1}, + {"ETHERTYPE_STP", Const, 1}, + {"ETHERTYPE_TALARIS", Const, 1}, + {"ETHERTYPE_TALARISMC", Const, 1}, + {"ETHERTYPE_TCPCOMP", Const, 1}, + {"ETHERTYPE_TCPSM", Const, 1}, + {"ETHERTYPE_TEC", Const, 1}, + {"ETHERTYPE_TIGAN", Const, 1}, + {"ETHERTYPE_TRAIL", Const, 1}, + {"ETHERTYPE_TRANSETHER", Const, 1}, + {"ETHERTYPE_TYMSHARE", Const, 1}, + {"ETHERTYPE_UBBST", Const, 1}, + {"ETHERTYPE_UBDEBUG", Const, 1}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1}, + {"ETHERTYPE_UBDL", Const, 1}, + {"ETHERTYPE_UBNIU", Const, 1}, + {"ETHERTYPE_UBNMC", Const, 1}, + {"ETHERTYPE_VALID", Const, 1}, + {"ETHERTYPE_VARIAN", Const, 1}, + {"ETHERTYPE_VAXELN", Const, 1}, + {"ETHERTYPE_VEECO", Const, 1}, + {"ETHERTYPE_VEXP", Const, 1}, + {"ETHERTYPE_VGLAB", Const, 1}, + {"ETHERTYPE_VINES", Const, 1}, + {"ETHERTYPE_VINESECHO", Const, 1}, + {"ETHERTYPE_VINESLOOP", Const, 1}, + {"ETHERTYPE_VITAL", Const, 1}, + {"ETHERTYPE_VLAN", Const, 1}, + {"ETHERTYPE_VLTLMAN", Const, 1}, + {"ETHERTYPE_VPROD", Const, 1}, + {"ETHERTYPE_VURESERVED", Const, 1}, + {"ETHERTYPE_WATERLOO", Const, 1}, + {"ETHERTYPE_WELLFLEET", Const, 1}, + {"ETHERTYPE_X25", Const, 1}, + {"ETHERTYPE_X75", Const, 1}, + {"ETHERTYPE_XNSSM", Const, 1}, + {"ETHERTYPE_XTP", Const, 1}, + {"ETHER_ADDR_LEN", Const, 1}, + {"ETHER_ALIGN", Const, 1}, + {"ETHER_CRC_LEN", Const, 1}, + {"ETHER_CRC_POLY_BE", Const, 1}, + {"ETHER_CRC_POLY_LE", Const, 1}, + {"ETHER_HDR_LEN", Const, 1}, + {"ETHER_MAX_DIX_LEN", Const, 1}, + {"ETHER_MAX_LEN", Const, 1}, + {"ETHER_MAX_LEN_JUMBO", Const, 1}, + {"ETHER_MIN_LEN", Const, 1}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1}, + {"ETHER_TYPE_LEN", Const, 1}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1}, + {"ETH_P_1588", Const, 0}, + {"ETH_P_8021Q", Const, 0}, + {"ETH_P_802_2", Const, 0}, + {"ETH_P_802_3", Const, 0}, + {"ETH_P_AARP", Const, 0}, + {"ETH_P_ALL", Const, 0}, + {"ETH_P_AOE", Const, 0}, + {"ETH_P_ARCNET", Const, 0}, + {"ETH_P_ARP", Const, 0}, + {"ETH_P_ATALK", Const, 0}, + {"ETH_P_ATMFATE", Const, 0}, + {"ETH_P_ATMMPOA", Const, 0}, + {"ETH_P_AX25", Const, 0}, + {"ETH_P_BPQ", Const, 0}, + {"ETH_P_CAIF", Const, 0}, + {"ETH_P_CAN", Const, 0}, + {"ETH_P_CONTROL", Const, 0}, + {"ETH_P_CUST", Const, 0}, + {"ETH_P_DDCMP", Const, 0}, + {"ETH_P_DEC", Const, 0}, + {"ETH_P_DIAG", Const, 0}, + {"ETH_P_DNA_DL", Const, 0}, + {"ETH_P_DNA_RC", Const, 0}, + {"ETH_P_DNA_RT", Const, 0}, + {"ETH_P_DSA", Const, 0}, + {"ETH_P_ECONET", Const, 0}, + {"ETH_P_EDSA", Const, 0}, + {"ETH_P_FCOE", Const, 0}, + {"ETH_P_FIP", Const, 0}, + {"ETH_P_HDLC", Const, 0}, + {"ETH_P_IEEE802154", Const, 0}, + {"ETH_P_IEEEPUP", Const, 0}, + {"ETH_P_IEEEPUPAT", Const, 0}, + {"ETH_P_IP", Const, 0}, + {"ETH_P_IPV6", Const, 0}, + {"ETH_P_IPX", Const, 0}, + {"ETH_P_IRDA", Const, 0}, + {"ETH_P_LAT", Const, 0}, + {"ETH_P_LINK_CTL", Const, 0}, + {"ETH_P_LOCALTALK", Const, 0}, + {"ETH_P_LOOP", Const, 0}, + {"ETH_P_MOBITEX", Const, 0}, + {"ETH_P_MPLS_MC", Const, 0}, + {"ETH_P_MPLS_UC", Const, 0}, + {"ETH_P_PAE", Const, 0}, + {"ETH_P_PAUSE", Const, 0}, + {"ETH_P_PHONET", Const, 0}, + {"ETH_P_PPPTALK", Const, 0}, + {"ETH_P_PPP_DISC", Const, 0}, + {"ETH_P_PPP_MP", Const, 0}, + {"ETH_P_PPP_SES", Const, 0}, + {"ETH_P_PUP", Const, 0}, + {"ETH_P_PUPAT", Const, 0}, + {"ETH_P_RARP", Const, 0}, + {"ETH_P_SCA", Const, 0}, + {"ETH_P_SLOW", Const, 0}, + {"ETH_P_SNAP", Const, 0}, + {"ETH_P_TEB", Const, 0}, + {"ETH_P_TIPC", Const, 0}, + {"ETH_P_TRAILER", Const, 0}, + {"ETH_P_TR_802_2", Const, 0}, + {"ETH_P_WAN_PPP", Const, 0}, + {"ETH_P_WCCP", Const, 0}, + {"ETH_P_X25", Const, 0}, + {"ETIME", Const, 0}, + {"ETIMEDOUT", Const, 0}, + {"ETOOMANYREFS", Const, 0}, + {"ETXTBSY", Const, 0}, + {"EUCLEAN", Const, 0}, + {"EUNATCH", Const, 0}, + {"EUSERS", Const, 0}, + {"EVFILT_AIO", Const, 0}, + {"EVFILT_FS", Const, 0}, + {"EVFILT_LIO", Const, 0}, + {"EVFILT_MACHPORT", Const, 0}, + {"EVFILT_PROC", Const, 0}, + {"EVFILT_READ", Const, 0}, + {"EVFILT_SIGNAL", Const, 0}, + {"EVFILT_SYSCOUNT", Const, 0}, + {"EVFILT_THREADMARKER", Const, 0}, + {"EVFILT_TIMER", Const, 0}, + {"EVFILT_USER", Const, 0}, + {"EVFILT_VM", Const, 0}, + {"EVFILT_VNODE", Const, 0}, + {"EVFILT_WRITE", Const, 0}, + {"EV_ADD", Const, 0}, + {"EV_CLEAR", Const, 0}, + {"EV_DELETE", Const, 0}, + {"EV_DISABLE", Const, 0}, + {"EV_DISPATCH", Const, 0}, + {"EV_DROP", Const, 3}, + {"EV_ENABLE", Const, 0}, + {"EV_EOF", Const, 0}, + {"EV_ERROR", Const, 0}, + {"EV_FLAG0", Const, 0}, + {"EV_FLAG1", Const, 0}, + {"EV_ONESHOT", Const, 0}, + {"EV_OOBAND", Const, 0}, + {"EV_POLL", Const, 0}, + {"EV_RECEIPT", Const, 0}, + {"EV_SYSFLAGS", Const, 0}, + {"EWINDOWS", Const, 0}, + {"EWOULDBLOCK", Const, 0}, + {"EXDEV", Const, 0}, + {"EXFULL", Const, 0}, + {"EXTA", Const, 0}, + {"EXTB", Const, 0}, + {"EXTPROC", Const, 0}, + {"Environ", Func, 0}, + {"EpollCreate", Func, 0}, + {"EpollCreate1", Func, 0}, + {"EpollCtl", Func, 0}, + {"EpollEvent", Type, 0}, + {"EpollEvent.Events", Field, 0}, + {"EpollEvent.Fd", Field, 0}, + {"EpollEvent.Pad", Field, 0}, + {"EpollEvent.PadFd", Field, 0}, + {"EpollWait", Func, 0}, + {"Errno", Type, 0}, + {"EscapeArg", Func, 0}, + {"Exchangedata", Func, 0}, + {"Exec", Func, 0}, + {"Exit", Func, 0}, + {"ExitProcess", Func, 0}, + {"FD_CLOEXEC", Const, 0}, + {"FD_SETSIZE", Const, 0}, + {"FILE_ACTION_ADDED", Const, 0}, + {"FILE_ACTION_MODIFIED", Const, 0}, + {"FILE_ACTION_REMOVED", Const, 0}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0}, + {"FILE_APPEND_DATA", Const, 0}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0}, + {"FILE_ATTRIBUTE_READONLY", Const, 0}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0}, + {"FILE_BEGIN", Const, 0}, + {"FILE_CURRENT", Const, 0}, + {"FILE_END", Const, 0}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4}, + {"FILE_FLAG_OVERLAPPED", Const, 0}, + {"FILE_LIST_DIRECTORY", Const, 0}, + {"FILE_MAP_COPY", Const, 0}, + {"FILE_MAP_EXECUTE", Const, 0}, + {"FILE_MAP_READ", Const, 0}, + {"FILE_MAP_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0}, + {"FILE_SHARE_DELETE", Const, 0}, + {"FILE_SHARE_READ", Const, 0}, + {"FILE_SHARE_WRITE", Const, 0}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2}, + {"FILE_TYPE_CHAR", Const, 0}, + {"FILE_TYPE_DISK", Const, 0}, + {"FILE_TYPE_PIPE", Const, 0}, + {"FILE_TYPE_REMOTE", Const, 0}, + {"FILE_TYPE_UNKNOWN", Const, 0}, + {"FILE_WRITE_ATTRIBUTES", Const, 0}, + {"FLUSHO", Const, 0}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0}, + {"FSCTL_GET_REPARSE_POINT", Const, 4}, + {"F_ADDFILESIGS", Const, 0}, + {"F_ADDSIGS", Const, 0}, + {"F_ALLOCATEALL", Const, 0}, + {"F_ALLOCATECONTIG", Const, 0}, + {"F_CANCEL", Const, 0}, + {"F_CHKCLEAN", Const, 0}, + {"F_CLOSEM", Const, 1}, + {"F_DUP2FD", Const, 0}, + {"F_DUP2FD_CLOEXEC", Const, 1}, + {"F_DUPFD", Const, 0}, + {"F_DUPFD_CLOEXEC", Const, 0}, + {"F_EXLCK", Const, 0}, + {"F_FINDSIGS", Const, 16}, + {"F_FLUSH_DATA", Const, 0}, + {"F_FREEZE_FS", Const, 0}, + {"F_FSCTL", Const, 1}, + {"F_FSDIRMASK", Const, 1}, + {"F_FSIN", Const, 1}, + {"F_FSINOUT", Const, 1}, + {"F_FSOUT", Const, 1}, + {"F_FSPRIV", Const, 1}, + {"F_FSVOID", Const, 1}, + {"F_FULLFSYNC", Const, 0}, + {"F_GETCODEDIR", Const, 16}, + {"F_GETFD", Const, 0}, + {"F_GETFL", Const, 0}, + {"F_GETLEASE", Const, 0}, + {"F_GETLK", Const, 0}, + {"F_GETLK64", Const, 0}, + {"F_GETLKPID", Const, 0}, + {"F_GETNOSIGPIPE", Const, 0}, + {"F_GETOWN", Const, 0}, + {"F_GETOWN_EX", Const, 0}, + {"F_GETPATH", Const, 0}, + {"F_GETPATH_MTMINFO", Const, 0}, + {"F_GETPIPE_SZ", Const, 0}, + {"F_GETPROTECTIONCLASS", Const, 0}, + {"F_GETPROTECTIONLEVEL", Const, 16}, + {"F_GETSIG", Const, 0}, + {"F_GLOBAL_NOCACHE", Const, 0}, + {"F_LOCK", Const, 0}, + {"F_LOG2PHYS", Const, 0}, + {"F_LOG2PHYS_EXT", Const, 0}, + {"F_MARKDEPENDENCY", Const, 0}, + {"F_MAXFD", Const, 1}, + {"F_NOCACHE", Const, 0}, + {"F_NODIRECT", Const, 0}, + {"F_NOTIFY", Const, 0}, + {"F_OGETLK", Const, 0}, + {"F_OK", Const, 0}, + {"F_OSETLK", Const, 0}, + {"F_OSETLKW", Const, 0}, + {"F_PARAM_MASK", Const, 1}, + {"F_PARAM_MAX", Const, 1}, + {"F_PATHPKG_CHECK", Const, 0}, + {"F_PEOFPOSMODE", Const, 0}, + {"F_PREALLOCATE", Const, 0}, + {"F_RDADVISE", Const, 0}, + {"F_RDAHEAD", Const, 0}, + {"F_RDLCK", Const, 0}, + {"F_READAHEAD", Const, 0}, + {"F_READBOOTSTRAP", Const, 0}, + {"F_SETBACKINGSTORE", Const, 0}, + {"F_SETFD", Const, 0}, + {"F_SETFL", Const, 0}, + {"F_SETLEASE", Const, 0}, + {"F_SETLK", Const, 0}, + {"F_SETLK64", Const, 0}, + {"F_SETLKW", Const, 0}, + {"F_SETLKW64", Const, 0}, + {"F_SETLKWTIMEOUT", Const, 16}, + {"F_SETLK_REMOTE", Const, 0}, + {"F_SETNOSIGPIPE", Const, 0}, + {"F_SETOWN", Const, 0}, + {"F_SETOWN_EX", Const, 0}, + {"F_SETPIPE_SZ", Const, 0}, + {"F_SETPROTECTIONCLASS", Const, 0}, + {"F_SETSIG", Const, 0}, + {"F_SETSIZE", Const, 0}, + {"F_SHLCK", Const, 0}, + {"F_SINGLE_WRITER", Const, 16}, + {"F_TEST", Const, 0}, + {"F_THAW_FS", Const, 0}, + {"F_TLOCK", Const, 0}, + {"F_TRANSCODEKEY", Const, 16}, + {"F_ULOCK", Const, 0}, + {"F_UNLCK", Const, 0}, + {"F_UNLCKSYS", Const, 0}, + {"F_VOLPOSMODE", Const, 0}, + {"F_WRITEBOOTSTRAP", Const, 0}, + {"F_WRLCK", Const, 0}, + {"Faccessat", Func, 0}, + {"Fallocate", Func, 0}, + {"Fbootstraptransfer_t", Type, 0}, + {"Fbootstraptransfer_t.Buffer", Field, 0}, + {"Fbootstraptransfer_t.Length", Field, 0}, + {"Fbootstraptransfer_t.Offset", Field, 0}, + {"Fchdir", Func, 0}, + {"Fchflags", Func, 0}, + {"Fchmod", Func, 0}, + {"Fchmodat", Func, 0}, + {"Fchown", Func, 0}, + {"Fchownat", Func, 0}, + {"FcntlFlock", Func, 3}, + {"FdSet", Type, 0}, + {"FdSet.Bits", Field, 0}, + {"FdSet.X__fds_bits", Field, 0}, + {"Fdatasync", Func, 0}, + {"FileNotifyInformation", Type, 0}, + {"FileNotifyInformation.Action", Field, 0}, + {"FileNotifyInformation.FileName", Field, 0}, + {"FileNotifyInformation.FileNameLength", Field, 0}, + {"FileNotifyInformation.NextEntryOffset", Field, 0}, + {"Filetime", Type, 0}, + {"Filetime.HighDateTime", Field, 0}, + {"Filetime.LowDateTime", Field, 0}, + {"FindClose", Func, 0}, + {"FindFirstFile", Func, 0}, + {"FindNextFile", Func, 0}, + {"Flock", Func, 0}, + {"Flock_t", Type, 0}, + {"Flock_t.Len", Field, 0}, + {"Flock_t.Pad_cgo_0", Field, 0}, + {"Flock_t.Pad_cgo_1", Field, 3}, + {"Flock_t.Pid", Field, 0}, + {"Flock_t.Start", Field, 0}, + {"Flock_t.Sysid", Field, 0}, + {"Flock_t.Type", Field, 0}, + {"Flock_t.Whence", Field, 0}, + {"FlushBpf", Func, 0}, + {"FlushFileBuffers", Func, 0}, + {"FlushViewOfFile", Func, 0}, + {"ForkExec", Func, 0}, + {"ForkLock", Var, 0}, + {"FormatMessage", Func, 0}, + {"Fpathconf", Func, 0}, + {"FreeAddrInfoW", Func, 1}, + {"FreeEnvironmentStrings", Func, 0}, + {"FreeLibrary", Func, 0}, + {"Fsid", Type, 0}, + {"Fsid.Val", Field, 0}, + {"Fsid.X__fsid_val", Field, 2}, + {"Fsid.X__val", Field, 0}, + {"Fstat", Func, 0}, + {"Fstatat", Func, 12}, + {"Fstatfs", Func, 0}, + {"Fstore_t", Type, 0}, + {"Fstore_t.Bytesalloc", Field, 0}, + {"Fstore_t.Flags", Field, 0}, + {"Fstore_t.Length", Field, 0}, + {"Fstore_t.Offset", Field, 0}, + {"Fstore_t.Posmode", Field, 0}, + {"Fsync", Func, 0}, + {"Ftruncate", Func, 0}, + {"FullPath", Func, 4}, + {"Futimes", Func, 0}, + {"Futimesat", Func, 0}, + {"GENERIC_ALL", Const, 0}, + {"GENERIC_EXECUTE", Const, 0}, + {"GENERIC_READ", Const, 0}, + {"GENERIC_WRITE", Const, 0}, + {"GUID", Type, 1}, + {"GUID.Data1", Field, 1}, + {"GUID.Data2", Field, 1}, + {"GUID.Data3", Field, 1}, + {"GUID.Data4", Field, 1}, + {"GetAcceptExSockaddrs", Func, 0}, + {"GetAdaptersInfo", Func, 0}, + {"GetAddrInfoW", Func, 1}, + {"GetCommandLine", Func, 0}, + {"GetComputerName", Func, 0}, + {"GetConsoleMode", Func, 1}, + {"GetCurrentDirectory", Func, 0}, + {"GetCurrentProcess", Func, 0}, + {"GetEnvironmentStrings", Func, 0}, + {"GetEnvironmentVariable", Func, 0}, + {"GetExitCodeProcess", Func, 0}, + {"GetFileAttributes", Func, 0}, + {"GetFileAttributesEx", Func, 0}, + {"GetFileExInfoStandard", Const, 0}, + {"GetFileExMaxInfoLevel", Const, 0}, + {"GetFileInformationByHandle", Func, 0}, + {"GetFileType", Func, 0}, + {"GetFullPathName", Func, 0}, + {"GetHostByName", Func, 0}, + {"GetIfEntry", Func, 0}, + {"GetLastError", Func, 0}, + {"GetLengthSid", Func, 0}, + {"GetLongPathName", Func, 0}, + {"GetProcAddress", Func, 0}, + {"GetProcessTimes", Func, 0}, + {"GetProtoByName", Func, 0}, + {"GetQueuedCompletionStatus", Func, 0}, + {"GetServByName", Func, 0}, + {"GetShortPathName", Func, 0}, + {"GetStartupInfo", Func, 0}, + {"GetStdHandle", Func, 0}, + {"GetSystemTimeAsFileTime", Func, 0}, + {"GetTempPath", Func, 0}, + {"GetTimeZoneInformation", Func, 0}, + {"GetTokenInformation", Func, 0}, + {"GetUserNameEx", Func, 0}, + {"GetUserProfileDirectory", Func, 0}, + {"GetVersion", Func, 0}, + {"Getcwd", Func, 0}, + {"Getdents", Func, 0}, + {"Getdirentries", Func, 0}, + {"Getdtablesize", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getfsstat", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpeername", Func, 0}, + {"Getpgid", Func, 0}, + {"Getpgrp", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getpriority", Func, 0}, + {"Getrlimit", Func, 0}, + {"Getrusage", Func, 0}, + {"Getsid", Func, 0}, + {"Getsockname", Func, 0}, + {"Getsockopt", Func, 1}, + {"GetsockoptByte", Func, 0}, + {"GetsockoptICMPv6Filter", Func, 2}, + {"GetsockoptIPMreq", Func, 0}, + {"GetsockoptIPMreqn", Func, 0}, + {"GetsockoptIPv6MTUInfo", Func, 2}, + {"GetsockoptIPv6Mreq", Func, 0}, + {"GetsockoptInet4Addr", Func, 0}, + {"GetsockoptInt", Func, 0}, + {"GetsockoptUcred", Func, 1}, + {"Gettid", Func, 0}, + {"Gettimeofday", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Getxattr", Func, 1}, + {"HANDLE_FLAG_INHERIT", Const, 0}, + {"HKEY_CLASSES_ROOT", Const, 0}, + {"HKEY_CURRENT_CONFIG", Const, 0}, + {"HKEY_CURRENT_USER", Const, 0}, + {"HKEY_DYN_DATA", Const, 0}, + {"HKEY_LOCAL_MACHINE", Const, 0}, + {"HKEY_PERFORMANCE_DATA", Const, 0}, + {"HKEY_USERS", Const, 0}, + {"HUPCL", Const, 0}, + {"Handle", Type, 0}, + {"Hostent", Type, 0}, + {"Hostent.AddrList", Field, 0}, + {"Hostent.AddrType", Field, 0}, + {"Hostent.Aliases", Field, 0}, + {"Hostent.Length", Field, 0}, + {"Hostent.Name", Field, 0}, + {"ICANON", Const, 0}, + {"ICMP6_FILTER", Const, 2}, + {"ICMPV6_FILTER", Const, 2}, + {"ICMPv6Filter", Type, 2}, + {"ICMPv6Filter.Data", Field, 2}, + {"ICMPv6Filter.Filt", Field, 2}, + {"ICRNL", Const, 0}, + {"IEXTEN", Const, 0}, + {"IFAN_ARRIVAL", Const, 1}, + {"IFAN_DEPARTURE", Const, 1}, + {"IFA_ADDRESS", Const, 0}, + {"IFA_ANYCAST", Const, 0}, + {"IFA_BROADCAST", Const, 0}, + {"IFA_CACHEINFO", Const, 0}, + {"IFA_F_DADFAILED", Const, 0}, + {"IFA_F_DEPRECATED", Const, 0}, + {"IFA_F_HOMEADDRESS", Const, 0}, + {"IFA_F_NODAD", Const, 0}, + {"IFA_F_OPTIMISTIC", Const, 0}, + {"IFA_F_PERMANENT", Const, 0}, + {"IFA_F_SECONDARY", Const, 0}, + {"IFA_F_TEMPORARY", Const, 0}, + {"IFA_F_TENTATIVE", Const, 0}, + {"IFA_LABEL", Const, 0}, + {"IFA_LOCAL", Const, 0}, + {"IFA_MAX", Const, 0}, + {"IFA_MULTICAST", Const, 0}, + {"IFA_ROUTE", Const, 1}, + {"IFA_UNSPEC", Const, 0}, + {"IFF_ALLMULTI", Const, 0}, + {"IFF_ALTPHYS", Const, 0}, + {"IFF_AUTOMEDIA", Const, 0}, + {"IFF_BROADCAST", Const, 0}, + {"IFF_CANTCHANGE", Const, 0}, + {"IFF_CANTCONFIG", Const, 1}, + {"IFF_DEBUG", Const, 0}, + {"IFF_DRV_OACTIVE", Const, 0}, + {"IFF_DRV_RUNNING", Const, 0}, + {"IFF_DYING", Const, 0}, + {"IFF_DYNAMIC", Const, 0}, + {"IFF_LINK0", Const, 0}, + {"IFF_LINK1", Const, 0}, + {"IFF_LINK2", Const, 0}, + {"IFF_LOOPBACK", Const, 0}, + {"IFF_MASTER", Const, 0}, + {"IFF_MONITOR", Const, 0}, + {"IFF_MULTICAST", Const, 0}, + {"IFF_NOARP", Const, 0}, + {"IFF_NOTRAILERS", Const, 0}, + {"IFF_NO_PI", Const, 0}, + {"IFF_OACTIVE", Const, 0}, + {"IFF_ONE_QUEUE", Const, 0}, + {"IFF_POINTOPOINT", Const, 0}, + {"IFF_POINTTOPOINT", Const, 0}, + {"IFF_PORTSEL", Const, 0}, + {"IFF_PPROMISC", Const, 0}, + {"IFF_PROMISC", Const, 0}, + {"IFF_RENAMING", Const, 0}, + {"IFF_RUNNING", Const, 0}, + {"IFF_SIMPLEX", Const, 0}, + {"IFF_SLAVE", Const, 0}, + {"IFF_SMART", Const, 0}, + {"IFF_STATICARP", Const, 0}, + {"IFF_TAP", Const, 0}, + {"IFF_TUN", Const, 0}, + {"IFF_TUN_EXCL", Const, 0}, + {"IFF_UP", Const, 0}, + {"IFF_VNET_HDR", Const, 0}, + {"IFLA_ADDRESS", Const, 0}, + {"IFLA_BROADCAST", Const, 0}, + {"IFLA_COST", Const, 0}, + {"IFLA_IFALIAS", Const, 0}, + {"IFLA_IFNAME", Const, 0}, + {"IFLA_LINK", Const, 0}, + {"IFLA_LINKINFO", Const, 0}, + {"IFLA_LINKMODE", Const, 0}, + {"IFLA_MAP", Const, 0}, + {"IFLA_MASTER", Const, 0}, + {"IFLA_MAX", Const, 0}, + {"IFLA_MTU", Const, 0}, + {"IFLA_NET_NS_PID", Const, 0}, + {"IFLA_OPERSTATE", Const, 0}, + {"IFLA_PRIORITY", Const, 0}, + {"IFLA_PROTINFO", Const, 0}, + {"IFLA_QDISC", Const, 0}, + {"IFLA_STATS", Const, 0}, + {"IFLA_TXQLEN", Const, 0}, + {"IFLA_UNSPEC", Const, 0}, + {"IFLA_WEIGHT", Const, 0}, + {"IFLA_WIRELESS", Const, 0}, + {"IFNAMSIZ", Const, 0}, + {"IFT_1822", Const, 0}, + {"IFT_A12MPPSWITCH", Const, 0}, + {"IFT_AAL2", Const, 0}, + {"IFT_AAL5", Const, 0}, + {"IFT_ADSL", Const, 0}, + {"IFT_AFLANE8023", Const, 0}, + {"IFT_AFLANE8025", Const, 0}, + {"IFT_ARAP", Const, 0}, + {"IFT_ARCNET", Const, 0}, + {"IFT_ARCNETPLUS", Const, 0}, + {"IFT_ASYNC", Const, 0}, + {"IFT_ATM", Const, 0}, + {"IFT_ATMDXI", Const, 0}, + {"IFT_ATMFUNI", Const, 0}, + {"IFT_ATMIMA", Const, 0}, + {"IFT_ATMLOGICAL", Const, 0}, + {"IFT_ATMRADIO", Const, 0}, + {"IFT_ATMSUBINTERFACE", Const, 0}, + {"IFT_ATMVCIENDPT", Const, 0}, + {"IFT_ATMVIRTUAL", Const, 0}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0}, + {"IFT_BLUETOOTH", Const, 1}, + {"IFT_BRIDGE", Const, 0}, + {"IFT_BSC", Const, 0}, + {"IFT_CARP", Const, 0}, + {"IFT_CCTEMUL", Const, 0}, + {"IFT_CELLULAR", Const, 0}, + {"IFT_CEPT", Const, 0}, + {"IFT_CES", Const, 0}, + {"IFT_CHANNEL", Const, 0}, + {"IFT_CNR", Const, 0}, + {"IFT_COFFEE", Const, 0}, + {"IFT_COMPOSITELINK", Const, 0}, + {"IFT_DCN", Const, 0}, + {"IFT_DIGITALPOWERLINE", Const, 0}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0}, + {"IFT_DLSW", Const, 0}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0}, + {"IFT_DOCSCABLEMACLAYER", Const, 0}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1}, + {"IFT_DS0", Const, 0}, + {"IFT_DS0BUNDLE", Const, 0}, + {"IFT_DS1FDL", Const, 0}, + {"IFT_DS3", Const, 0}, + {"IFT_DTM", Const, 0}, + {"IFT_DUMMY", Const, 1}, + {"IFT_DVBASILN", Const, 0}, + {"IFT_DVBASIOUT", Const, 0}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0}, + {"IFT_DVBRCCMACLAYER", Const, 0}, + {"IFT_DVBRCCUPSTREAM", Const, 0}, + {"IFT_ECONET", Const, 1}, + {"IFT_ENC", Const, 0}, + {"IFT_EON", Const, 0}, + {"IFT_EPLRS", Const, 0}, + {"IFT_ESCON", Const, 0}, + {"IFT_ETHER", Const, 0}, + {"IFT_FAITH", Const, 0}, + {"IFT_FAST", Const, 0}, + {"IFT_FASTETHER", Const, 0}, + {"IFT_FASTETHERFX", Const, 0}, + {"IFT_FDDI", Const, 0}, + {"IFT_FIBRECHANNEL", Const, 0}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0}, + {"IFT_FRAMERELAYMPI", Const, 0}, + {"IFT_FRDLCIENDPT", Const, 0}, + {"IFT_FRELAY", Const, 0}, + {"IFT_FRELAYDCE", Const, 0}, + {"IFT_FRF16MFRBUNDLE", Const, 0}, + {"IFT_FRFORWARD", Const, 0}, + {"IFT_G703AT2MB", Const, 0}, + {"IFT_G703AT64K", Const, 0}, + {"IFT_GIF", Const, 0}, + {"IFT_GIGABITETHERNET", Const, 0}, + {"IFT_GR303IDT", Const, 0}, + {"IFT_GR303RDT", Const, 0}, + {"IFT_H323GATEKEEPER", Const, 0}, + {"IFT_H323PROXY", Const, 0}, + {"IFT_HDH1822", Const, 0}, + {"IFT_HDLC", Const, 0}, + {"IFT_HDSL2", Const, 0}, + {"IFT_HIPERLAN2", Const, 0}, + {"IFT_HIPPI", Const, 0}, + {"IFT_HIPPIINTERFACE", Const, 0}, + {"IFT_HOSTPAD", Const, 0}, + {"IFT_HSSI", Const, 0}, + {"IFT_HY", Const, 0}, + {"IFT_IBM370PARCHAN", Const, 0}, + {"IFT_IDSL", Const, 0}, + {"IFT_IEEE1394", Const, 0}, + {"IFT_IEEE80211", Const, 0}, + {"IFT_IEEE80212", Const, 0}, + {"IFT_IEEE8023ADLAG", Const, 0}, + {"IFT_IFGSN", Const, 0}, + {"IFT_IMT", Const, 0}, + {"IFT_INFINIBAND", Const, 1}, + {"IFT_INTERLEAVE", Const, 0}, + {"IFT_IP", Const, 0}, + {"IFT_IPFORWARD", Const, 0}, + {"IFT_IPOVERATM", Const, 0}, + {"IFT_IPOVERCDLC", Const, 0}, + {"IFT_IPOVERCLAW", Const, 0}, + {"IFT_IPSWITCH", Const, 0}, + {"IFT_IPXIP", Const, 0}, + {"IFT_ISDN", Const, 0}, + {"IFT_ISDNBASIC", Const, 0}, + {"IFT_ISDNPRIMARY", Const, 0}, + {"IFT_ISDNS", Const, 0}, + {"IFT_ISDNU", Const, 0}, + {"IFT_ISO88022LLC", Const, 0}, + {"IFT_ISO88023", Const, 0}, + {"IFT_ISO88024", Const, 0}, + {"IFT_ISO88025", Const, 0}, + {"IFT_ISO88025CRFPINT", Const, 0}, + {"IFT_ISO88025DTR", Const, 0}, + {"IFT_ISO88025FIBER", Const, 0}, + {"IFT_ISO88026", Const, 0}, + {"IFT_ISUP", Const, 0}, + {"IFT_L2VLAN", Const, 0}, + {"IFT_L3IPVLAN", Const, 0}, + {"IFT_L3IPXVLAN", Const, 0}, + {"IFT_LAPB", Const, 0}, + {"IFT_LAPD", Const, 0}, + {"IFT_LAPF", Const, 0}, + {"IFT_LINEGROUP", Const, 1}, + {"IFT_LOCALTALK", Const, 0}, + {"IFT_LOOP", Const, 0}, + {"IFT_MEDIAMAILOVERIP", Const, 0}, + {"IFT_MFSIGLINK", Const, 0}, + {"IFT_MIOX25", Const, 0}, + {"IFT_MODEM", Const, 0}, + {"IFT_MPC", Const, 0}, + {"IFT_MPLS", Const, 0}, + {"IFT_MPLSTUNNEL", Const, 0}, + {"IFT_MSDSL", Const, 0}, + {"IFT_MVL", Const, 0}, + {"IFT_MYRINET", Const, 0}, + {"IFT_NFAS", Const, 0}, + {"IFT_NSIP", Const, 0}, + {"IFT_OPTICALCHANNEL", Const, 0}, + {"IFT_OPTICALTRANSPORT", Const, 0}, + {"IFT_OTHER", Const, 0}, + {"IFT_P10", Const, 0}, + {"IFT_P80", Const, 0}, + {"IFT_PARA", Const, 0}, + {"IFT_PDP", Const, 0}, + {"IFT_PFLOG", Const, 0}, + {"IFT_PFLOW", Const, 1}, + {"IFT_PFSYNC", Const, 0}, + {"IFT_PLC", Const, 0}, + {"IFT_PON155", Const, 1}, + {"IFT_PON622", Const, 1}, + {"IFT_POS", Const, 0}, + {"IFT_PPP", Const, 0}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0}, + {"IFT_PROPATM", Const, 1}, + {"IFT_PROPBWAP2MP", Const, 0}, + {"IFT_PROPCNLS", Const, 0}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0}, + {"IFT_PROPMUX", Const, 0}, + {"IFT_PROPVIRTUAL", Const, 0}, + {"IFT_PROPWIRELESSP2P", Const, 0}, + {"IFT_PTPSERIAL", Const, 0}, + {"IFT_PVC", Const, 0}, + {"IFT_Q2931", Const, 1}, + {"IFT_QLLC", Const, 0}, + {"IFT_RADIOMAC", Const, 0}, + {"IFT_RADSL", Const, 0}, + {"IFT_REACHDSL", Const, 0}, + {"IFT_RFC1483", Const, 0}, + {"IFT_RS232", Const, 0}, + {"IFT_RSRB", Const, 0}, + {"IFT_SDLC", Const, 0}, + {"IFT_SDSL", Const, 0}, + {"IFT_SHDSL", Const, 0}, + {"IFT_SIP", Const, 0}, + {"IFT_SIPSIG", Const, 1}, + {"IFT_SIPTG", Const, 1}, + {"IFT_SLIP", Const, 0}, + {"IFT_SMDSDXI", Const, 0}, + {"IFT_SMDSICIP", Const, 0}, + {"IFT_SONET", Const, 0}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0}, + {"IFT_SONETPATH", Const, 0}, + {"IFT_SONETVT", Const, 0}, + {"IFT_SRP", Const, 0}, + {"IFT_SS7SIGLINK", Const, 0}, + {"IFT_STACKTOSTACK", Const, 0}, + {"IFT_STARLAN", Const, 0}, + {"IFT_STF", Const, 0}, + {"IFT_T1", Const, 0}, + {"IFT_TDLC", Const, 0}, + {"IFT_TELINK", Const, 1}, + {"IFT_TERMPAD", Const, 0}, + {"IFT_TR008", Const, 0}, + {"IFT_TRANSPHDLC", Const, 0}, + {"IFT_TUNNEL", Const, 0}, + {"IFT_ULTRA", Const, 0}, + {"IFT_USB", Const, 0}, + {"IFT_V11", Const, 0}, + {"IFT_V35", Const, 0}, + {"IFT_V36", Const, 0}, + {"IFT_V37", Const, 0}, + {"IFT_VDSL", Const, 0}, + {"IFT_VIRTUALIPADDRESS", Const, 0}, + {"IFT_VIRTUALTG", Const, 1}, + {"IFT_VOICEDID", Const, 1}, + {"IFT_VOICEEM", Const, 0}, + {"IFT_VOICEEMFGD", Const, 1}, + {"IFT_VOICEENCAP", Const, 0}, + {"IFT_VOICEFGDEANA", Const, 1}, + {"IFT_VOICEFXO", Const, 0}, + {"IFT_VOICEFXS", Const, 0}, + {"IFT_VOICEOVERATM", Const, 0}, + {"IFT_VOICEOVERCABLE", Const, 1}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0}, + {"IFT_VOICEOVERIP", Const, 0}, + {"IFT_X213", Const, 0}, + {"IFT_X25", Const, 0}, + {"IFT_X25DDN", Const, 0}, + {"IFT_X25HUNTGROUP", Const, 0}, + {"IFT_X25MLP", Const, 0}, + {"IFT_X25PLE", Const, 0}, + {"IFT_XETHER", Const, 0}, + {"IGNBRK", Const, 0}, + {"IGNCR", Const, 0}, + {"IGNORE", Const, 0}, + {"IGNPAR", Const, 0}, + {"IMAXBEL", Const, 0}, + {"INFINITE", Const, 0}, + {"INLCR", Const, 0}, + {"INPCK", Const, 0}, + {"INVALID_FILE_ATTRIBUTES", Const, 0}, + {"IN_ACCESS", Const, 0}, + {"IN_ALL_EVENTS", Const, 0}, + {"IN_ATTRIB", Const, 0}, + {"IN_CLASSA_HOST", Const, 0}, + {"IN_CLASSA_MAX", Const, 0}, + {"IN_CLASSA_NET", Const, 0}, + {"IN_CLASSA_NSHIFT", Const, 0}, + {"IN_CLASSB_HOST", Const, 0}, + {"IN_CLASSB_MAX", Const, 0}, + {"IN_CLASSB_NET", Const, 0}, + {"IN_CLASSB_NSHIFT", Const, 0}, + {"IN_CLASSC_HOST", Const, 0}, + {"IN_CLASSC_NET", Const, 0}, + {"IN_CLASSC_NSHIFT", Const, 0}, + {"IN_CLASSD_HOST", Const, 0}, + {"IN_CLASSD_NET", Const, 0}, + {"IN_CLASSD_NSHIFT", Const, 0}, + {"IN_CLOEXEC", Const, 0}, + {"IN_CLOSE", Const, 0}, + {"IN_CLOSE_NOWRITE", Const, 0}, + {"IN_CLOSE_WRITE", Const, 0}, + {"IN_CREATE", Const, 0}, + {"IN_DELETE", Const, 0}, + {"IN_DELETE_SELF", Const, 0}, + {"IN_DONT_FOLLOW", Const, 0}, + {"IN_EXCL_UNLINK", Const, 0}, + {"IN_IGNORED", Const, 0}, + {"IN_ISDIR", Const, 0}, + {"IN_LINKLOCALNETNUM", Const, 0}, + {"IN_LOOPBACKNET", Const, 0}, + {"IN_MASK_ADD", Const, 0}, + {"IN_MODIFY", Const, 0}, + {"IN_MOVE", Const, 0}, + {"IN_MOVED_FROM", Const, 0}, + {"IN_MOVED_TO", Const, 0}, + {"IN_MOVE_SELF", Const, 0}, + {"IN_NONBLOCK", Const, 0}, + {"IN_ONESHOT", Const, 0}, + {"IN_ONLYDIR", Const, 0}, + {"IN_OPEN", Const, 0}, + {"IN_Q_OVERFLOW", Const, 0}, + {"IN_RFC3021_HOST", Const, 1}, + {"IN_RFC3021_MASK", Const, 1}, + {"IN_RFC3021_NET", Const, 1}, + {"IN_RFC3021_NSHIFT", Const, 1}, + {"IN_UNMOUNT", Const, 0}, + {"IOC_IN", Const, 1}, + {"IOC_INOUT", Const, 1}, + {"IOC_OUT", Const, 1}, + {"IOC_VENDOR", Const, 3}, + {"IOC_WS2", Const, 1}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4}, + {"IPMreq", Type, 0}, + {"IPMreq.Interface", Field, 0}, + {"IPMreq.Multiaddr", Field, 0}, + {"IPMreqn", Type, 0}, + {"IPMreqn.Address", Field, 0}, + {"IPMreqn.Ifindex", Field, 0}, + {"IPMreqn.Multiaddr", Field, 0}, + {"IPPROTO_3PC", Const, 0}, + {"IPPROTO_ADFS", Const, 0}, + {"IPPROTO_AH", Const, 0}, + {"IPPROTO_AHIP", Const, 0}, + {"IPPROTO_APES", Const, 0}, + {"IPPROTO_ARGUS", Const, 0}, + {"IPPROTO_AX25", Const, 0}, + {"IPPROTO_BHA", Const, 0}, + {"IPPROTO_BLT", Const, 0}, + {"IPPROTO_BRSATMON", Const, 0}, + {"IPPROTO_CARP", Const, 0}, + {"IPPROTO_CFTP", Const, 0}, + {"IPPROTO_CHAOS", Const, 0}, + {"IPPROTO_CMTP", Const, 0}, + {"IPPROTO_COMP", Const, 0}, + {"IPPROTO_CPHB", Const, 0}, + {"IPPROTO_CPNX", Const, 0}, + {"IPPROTO_DCCP", Const, 0}, + {"IPPROTO_DDP", Const, 0}, + {"IPPROTO_DGP", Const, 0}, + {"IPPROTO_DIVERT", Const, 0}, + {"IPPROTO_DIVERT_INIT", Const, 3}, + {"IPPROTO_DIVERT_RESP", Const, 3}, + {"IPPROTO_DONE", Const, 0}, + {"IPPROTO_DSTOPTS", Const, 0}, + {"IPPROTO_EGP", Const, 0}, + {"IPPROTO_EMCON", Const, 0}, + {"IPPROTO_ENCAP", Const, 0}, + {"IPPROTO_EON", Const, 0}, + {"IPPROTO_ESP", Const, 0}, + {"IPPROTO_ETHERIP", Const, 0}, + {"IPPROTO_FRAGMENT", Const, 0}, + {"IPPROTO_GGP", Const, 0}, + {"IPPROTO_GMTP", Const, 0}, + {"IPPROTO_GRE", Const, 0}, + {"IPPROTO_HELLO", Const, 0}, + {"IPPROTO_HMP", Const, 0}, + {"IPPROTO_HOPOPTS", Const, 0}, + {"IPPROTO_ICMP", Const, 0}, + {"IPPROTO_ICMPV6", Const, 0}, + {"IPPROTO_IDP", Const, 0}, + {"IPPROTO_IDPR", Const, 0}, + {"IPPROTO_IDRP", Const, 0}, + {"IPPROTO_IGMP", Const, 0}, + {"IPPROTO_IGP", Const, 0}, + {"IPPROTO_IGRP", Const, 0}, + {"IPPROTO_IL", Const, 0}, + {"IPPROTO_INLSP", Const, 0}, + {"IPPROTO_INP", Const, 0}, + {"IPPROTO_IP", Const, 0}, + {"IPPROTO_IPCOMP", Const, 0}, + {"IPPROTO_IPCV", Const, 0}, + {"IPPROTO_IPEIP", Const, 0}, + {"IPPROTO_IPIP", Const, 0}, + {"IPPROTO_IPPC", Const, 0}, + {"IPPROTO_IPV4", Const, 0}, + {"IPPROTO_IPV6", Const, 0}, + {"IPPROTO_IPV6_ICMP", Const, 1}, + {"IPPROTO_IRTP", Const, 0}, + {"IPPROTO_KRYPTOLAN", Const, 0}, + {"IPPROTO_LARP", Const, 0}, + {"IPPROTO_LEAF1", Const, 0}, + {"IPPROTO_LEAF2", Const, 0}, + {"IPPROTO_MAX", Const, 0}, + {"IPPROTO_MAXID", Const, 0}, + {"IPPROTO_MEAS", Const, 0}, + {"IPPROTO_MH", Const, 1}, + {"IPPROTO_MHRP", Const, 0}, + {"IPPROTO_MICP", Const, 0}, + {"IPPROTO_MOBILE", Const, 0}, + {"IPPROTO_MPLS", Const, 1}, + {"IPPROTO_MTP", Const, 0}, + {"IPPROTO_MUX", Const, 0}, + {"IPPROTO_ND", Const, 0}, + {"IPPROTO_NHRP", Const, 0}, + {"IPPROTO_NONE", Const, 0}, + {"IPPROTO_NSP", Const, 0}, + {"IPPROTO_NVPII", Const, 0}, + {"IPPROTO_OLD_DIVERT", Const, 0}, + {"IPPROTO_OSPFIGP", Const, 0}, + {"IPPROTO_PFSYNC", Const, 0}, + {"IPPROTO_PGM", Const, 0}, + {"IPPROTO_PIGP", Const, 0}, + {"IPPROTO_PIM", Const, 0}, + {"IPPROTO_PRM", Const, 0}, + {"IPPROTO_PUP", Const, 0}, + {"IPPROTO_PVP", Const, 0}, + {"IPPROTO_RAW", Const, 0}, + {"IPPROTO_RCCMON", Const, 0}, + {"IPPROTO_RDP", Const, 0}, + {"IPPROTO_ROUTING", Const, 0}, + {"IPPROTO_RSVP", Const, 0}, + {"IPPROTO_RVD", Const, 0}, + {"IPPROTO_SATEXPAK", Const, 0}, + {"IPPROTO_SATMON", Const, 0}, + {"IPPROTO_SCCSP", Const, 0}, + {"IPPROTO_SCTP", Const, 0}, + {"IPPROTO_SDRP", Const, 0}, + {"IPPROTO_SEND", Const, 1}, + {"IPPROTO_SEP", Const, 0}, + {"IPPROTO_SKIP", Const, 0}, + {"IPPROTO_SPACER", Const, 0}, + {"IPPROTO_SRPC", Const, 0}, + {"IPPROTO_ST", Const, 0}, + {"IPPROTO_SVMTP", Const, 0}, + {"IPPROTO_SWIPE", Const, 0}, + {"IPPROTO_TCF", Const, 0}, + {"IPPROTO_TCP", Const, 0}, + {"IPPROTO_TLSP", Const, 0}, + {"IPPROTO_TP", Const, 0}, + {"IPPROTO_TPXX", Const, 0}, + {"IPPROTO_TRUNK1", Const, 0}, + {"IPPROTO_TRUNK2", Const, 0}, + {"IPPROTO_TTP", Const, 0}, + {"IPPROTO_UDP", Const, 0}, + {"IPPROTO_UDPLITE", Const, 0}, + {"IPPROTO_VINES", Const, 0}, + {"IPPROTO_VISA", Const, 0}, + {"IPPROTO_VMTP", Const, 0}, + {"IPPROTO_VRRP", Const, 1}, + {"IPPROTO_WBEXPAK", Const, 0}, + {"IPPROTO_WBMON", Const, 0}, + {"IPPROTO_WSN", Const, 0}, + {"IPPROTO_XNET", Const, 0}, + {"IPPROTO_XTP", Const, 0}, + {"IPV6_2292DSTOPTS", Const, 0}, + {"IPV6_2292HOPLIMIT", Const, 0}, + {"IPV6_2292HOPOPTS", Const, 0}, + {"IPV6_2292NEXTHOP", Const, 0}, + {"IPV6_2292PKTINFO", Const, 0}, + {"IPV6_2292PKTOPTIONS", Const, 0}, + {"IPV6_2292RTHDR", Const, 0}, + {"IPV6_ADDRFORM", Const, 0}, + {"IPV6_ADD_MEMBERSHIP", Const, 0}, + {"IPV6_AUTHHDR", Const, 0}, + {"IPV6_AUTH_LEVEL", Const, 1}, + {"IPV6_AUTOFLOWLABEL", Const, 0}, + {"IPV6_BINDANY", Const, 0}, + {"IPV6_BINDV6ONLY", Const, 0}, + {"IPV6_BOUND_IF", Const, 0}, + {"IPV6_CHECKSUM", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IPV6_DEFHLIM", Const, 0}, + {"IPV6_DONTFRAG", Const, 0}, + {"IPV6_DROP_MEMBERSHIP", Const, 0}, + {"IPV6_DSTOPTS", Const, 0}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1}, + {"IPV6_FAITH", Const, 0}, + {"IPV6_FLOWINFO_MASK", Const, 0}, + {"IPV6_FLOWLABEL_MASK", Const, 0}, + {"IPV6_FRAGTTL", Const, 0}, + {"IPV6_FW_ADD", Const, 0}, + {"IPV6_FW_DEL", Const, 0}, + {"IPV6_FW_FLUSH", Const, 0}, + {"IPV6_FW_GET", Const, 0}, + {"IPV6_FW_ZERO", Const, 0}, + {"IPV6_HLIMDEC", Const, 0}, + {"IPV6_HOPLIMIT", Const, 0}, + {"IPV6_HOPOPTS", Const, 0}, + {"IPV6_IPCOMP_LEVEL", Const, 1}, + {"IPV6_IPSEC_POLICY", Const, 0}, + {"IPV6_JOIN_ANYCAST", Const, 0}, + {"IPV6_JOIN_GROUP", Const, 0}, + {"IPV6_LEAVE_ANYCAST", Const, 0}, + {"IPV6_LEAVE_GROUP", Const, 0}, + {"IPV6_MAXHLIM", Const, 0}, + {"IPV6_MAXOPTHDR", Const, 0}, + {"IPV6_MAXPACKET", Const, 0}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0}, + {"IPV6_MMTU", Const, 0}, + {"IPV6_MSFILTER", Const, 0}, + {"IPV6_MTU", Const, 0}, + {"IPV6_MTU_DISCOVER", Const, 0}, + {"IPV6_MULTICAST_HOPS", Const, 0}, + {"IPV6_MULTICAST_IF", Const, 0}, + {"IPV6_MULTICAST_LOOP", Const, 0}, + {"IPV6_NEXTHOP", Const, 0}, + {"IPV6_OPTIONS", Const, 1}, + {"IPV6_PATHMTU", Const, 0}, + {"IPV6_PIPEX", Const, 1}, + {"IPV6_PKTINFO", Const, 0}, + {"IPV6_PMTUDISC_DO", Const, 0}, + {"IPV6_PMTUDISC_DONT", Const, 0}, + {"IPV6_PMTUDISC_PROBE", Const, 0}, + {"IPV6_PMTUDISC_WANT", Const, 0}, + {"IPV6_PORTRANGE", Const, 0}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0}, + {"IPV6_PORTRANGE_HIGH", Const, 0}, + {"IPV6_PORTRANGE_LOW", Const, 0}, + {"IPV6_PREFER_TEMPADDR", Const, 0}, + {"IPV6_RECVDSTOPTS", Const, 0}, + {"IPV6_RECVDSTPORT", Const, 3}, + {"IPV6_RECVERR", Const, 0}, + {"IPV6_RECVHOPLIMIT", Const, 0}, + {"IPV6_RECVHOPOPTS", Const, 0}, + {"IPV6_RECVPATHMTU", Const, 0}, + {"IPV6_RECVPKTINFO", Const, 0}, + {"IPV6_RECVRTHDR", Const, 0}, + {"IPV6_RECVTCLASS", Const, 0}, + {"IPV6_ROUTER_ALERT", Const, 0}, + {"IPV6_RTABLE", Const, 1}, + {"IPV6_RTHDR", Const, 0}, + {"IPV6_RTHDRDSTOPTS", Const, 0}, + {"IPV6_RTHDR_LOOSE", Const, 0}, + {"IPV6_RTHDR_STRICT", Const, 0}, + {"IPV6_RTHDR_TYPE_0", Const, 0}, + {"IPV6_RXDSTOPTS", Const, 0}, + {"IPV6_RXHOPOPTS", Const, 0}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0}, + {"IPV6_TCLASS", Const, 0}, + {"IPV6_UNICAST_HOPS", Const, 0}, + {"IPV6_USE_MIN_MTU", Const, 0}, + {"IPV6_V6ONLY", Const, 0}, + {"IPV6_VERSION", Const, 0}, + {"IPV6_VERSION_MASK", Const, 0}, + {"IPV6_XFRM_POLICY", Const, 0}, + {"IP_ADD_MEMBERSHIP", Const, 0}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_AUTH_LEVEL", Const, 1}, + {"IP_BINDANY", Const, 0}, + {"IP_BLOCK_SOURCE", Const, 0}, + {"IP_BOUND_IF", Const, 0}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0}, + {"IP_DF", Const, 0}, + {"IP_DIVERTFL", Const, 3}, + {"IP_DONTFRAG", Const, 0}, + {"IP_DROP_MEMBERSHIP", Const, 0}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_DUMMYNET3", Const, 0}, + {"IP_DUMMYNET_CONFIGURE", Const, 0}, + {"IP_DUMMYNET_DEL", Const, 0}, + {"IP_DUMMYNET_FLUSH", Const, 0}, + {"IP_DUMMYNET_GET", Const, 0}, + {"IP_EF", Const, 1}, + {"IP_ERRORMTU", Const, 1}, + {"IP_ESP_NETWORK_LEVEL", Const, 1}, + {"IP_ESP_TRANS_LEVEL", Const, 1}, + {"IP_FAITH", Const, 0}, + {"IP_FREEBIND", Const, 0}, + {"IP_FW3", Const, 0}, + {"IP_FW_ADD", Const, 0}, + {"IP_FW_DEL", Const, 0}, + {"IP_FW_FLUSH", Const, 0}, + {"IP_FW_GET", Const, 0}, + {"IP_FW_NAT_CFG", Const, 0}, + {"IP_FW_NAT_DEL", Const, 0}, + {"IP_FW_NAT_GET_CONFIG", Const, 0}, + {"IP_FW_NAT_GET_LOG", Const, 0}, + {"IP_FW_RESETLOG", Const, 0}, + {"IP_FW_TABLE_ADD", Const, 0}, + {"IP_FW_TABLE_DEL", Const, 0}, + {"IP_FW_TABLE_FLUSH", Const, 0}, + {"IP_FW_TABLE_GETSIZE", Const, 0}, + {"IP_FW_TABLE_LIST", Const, 0}, + {"IP_FW_ZERO", Const, 0}, + {"IP_HDRINCL", Const, 0}, + {"IP_IPCOMP_LEVEL", Const, 1}, + {"IP_IPSECFLOWINFO", Const, 1}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1}, + {"IP_IPSEC_LOCAL_CRED", Const, 1}, + {"IP_IPSEC_LOCAL_ID", Const, 1}, + {"IP_IPSEC_POLICY", Const, 0}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1}, + {"IP_IPSEC_REMOTE_CRED", Const, 1}, + {"IP_IPSEC_REMOTE_ID", Const, 1}, + {"IP_MAXPACKET", Const, 0}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IP_MAX_MEMBERSHIPS", Const, 0}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IP_MAX_SOURCE_FILTER", Const, 0}, + {"IP_MF", Const, 0}, + {"IP_MINFRAGSIZE", Const, 1}, + {"IP_MINTTL", Const, 0}, + {"IP_MIN_MEMBERSHIPS", Const, 0}, + {"IP_MSFILTER", Const, 0}, + {"IP_MSS", Const, 0}, + {"IP_MTU", Const, 0}, + {"IP_MTU_DISCOVER", Const, 0}, + {"IP_MULTICAST_IF", Const, 0}, + {"IP_MULTICAST_IFINDEX", Const, 0}, + {"IP_MULTICAST_LOOP", Const, 0}, + {"IP_MULTICAST_TTL", Const, 0}, + {"IP_MULTICAST_VIF", Const, 0}, + {"IP_NAT__XXX", Const, 0}, + {"IP_OFFMASK", Const, 0}, + {"IP_OLD_FW_ADD", Const, 0}, + {"IP_OLD_FW_DEL", Const, 0}, + {"IP_OLD_FW_FLUSH", Const, 0}, + {"IP_OLD_FW_GET", Const, 0}, + {"IP_OLD_FW_RESETLOG", Const, 0}, + {"IP_OLD_FW_ZERO", Const, 0}, + {"IP_ONESBCAST", Const, 0}, + {"IP_OPTIONS", Const, 0}, + {"IP_ORIGDSTADDR", Const, 0}, + {"IP_PASSSEC", Const, 0}, + {"IP_PIPEX", Const, 1}, + {"IP_PKTINFO", Const, 0}, + {"IP_PKTOPTIONS", Const, 0}, + {"IP_PMTUDISC", Const, 0}, + {"IP_PMTUDISC_DO", Const, 0}, + {"IP_PMTUDISC_DONT", Const, 0}, + {"IP_PMTUDISC_PROBE", Const, 0}, + {"IP_PMTUDISC_WANT", Const, 0}, + {"IP_PORTRANGE", Const, 0}, + {"IP_PORTRANGE_DEFAULT", Const, 0}, + {"IP_PORTRANGE_HIGH", Const, 0}, + {"IP_PORTRANGE_LOW", Const, 0}, + {"IP_RECVDSTADDR", Const, 0}, + {"IP_RECVDSTPORT", Const, 1}, + {"IP_RECVERR", Const, 0}, + {"IP_RECVIF", Const, 0}, + {"IP_RECVOPTS", Const, 0}, + {"IP_RECVORIGDSTADDR", Const, 0}, + {"IP_RECVPKTINFO", Const, 0}, + {"IP_RECVRETOPTS", Const, 0}, + {"IP_RECVRTABLE", Const, 1}, + {"IP_RECVTOS", Const, 0}, + {"IP_RECVTTL", Const, 0}, + {"IP_RETOPTS", Const, 0}, + {"IP_RF", Const, 0}, + {"IP_ROUTER_ALERT", Const, 0}, + {"IP_RSVP_OFF", Const, 0}, + {"IP_RSVP_ON", Const, 0}, + {"IP_RSVP_VIF_OFF", Const, 0}, + {"IP_RSVP_VIF_ON", Const, 0}, + {"IP_RTABLE", Const, 1}, + {"IP_SENDSRCADDR", Const, 0}, + {"IP_STRIPHDR", Const, 0}, + {"IP_TOS", Const, 0}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0}, + {"IP_TRANSPARENT", Const, 0}, + {"IP_TTL", Const, 0}, + {"IP_UNBLOCK_SOURCE", Const, 0}, + {"IP_XFRM_POLICY", Const, 0}, + {"IPv6MTUInfo", Type, 2}, + {"IPv6MTUInfo.Addr", Field, 2}, + {"IPv6MTUInfo.Mtu", Field, 2}, + {"IPv6Mreq", Type, 0}, + {"IPv6Mreq.Interface", Field, 0}, + {"IPv6Mreq.Multiaddr", Field, 0}, + {"ISIG", Const, 0}, + {"ISTRIP", Const, 0}, + {"IUCLC", Const, 0}, + {"IUTF8", Const, 0}, + {"IXANY", Const, 0}, + {"IXOFF", Const, 0}, + {"IXON", Const, 0}, + {"IfAddrmsg", Type, 0}, + {"IfAddrmsg.Family", Field, 0}, + {"IfAddrmsg.Flags", Field, 0}, + {"IfAddrmsg.Index", Field, 0}, + {"IfAddrmsg.Prefixlen", Field, 0}, + {"IfAddrmsg.Scope", Field, 0}, + {"IfAnnounceMsghdr", Type, 1}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2}, + {"IfAnnounceMsghdr.Index", Field, 1}, + {"IfAnnounceMsghdr.Msglen", Field, 1}, + {"IfAnnounceMsghdr.Name", Field, 1}, + {"IfAnnounceMsghdr.Type", Field, 1}, + {"IfAnnounceMsghdr.Version", Field, 1}, + {"IfAnnounceMsghdr.What", Field, 1}, + {"IfData", Type, 0}, + {"IfData.Addrlen", Field, 0}, + {"IfData.Baudrate", Field, 0}, + {"IfData.Capabilities", Field, 2}, + {"IfData.Collisions", Field, 0}, + {"IfData.Datalen", Field, 0}, + {"IfData.Epoch", Field, 0}, + {"IfData.Hdrlen", Field, 0}, + {"IfData.Hwassist", Field, 0}, + {"IfData.Ibytes", Field, 0}, + {"IfData.Ierrors", Field, 0}, + {"IfData.Imcasts", Field, 0}, + {"IfData.Ipackets", Field, 0}, + {"IfData.Iqdrops", Field, 0}, + {"IfData.Lastchange", Field, 0}, + {"IfData.Link_state", Field, 0}, + {"IfData.Mclpool", Field, 2}, + {"IfData.Metric", Field, 0}, + {"IfData.Mtu", Field, 0}, + {"IfData.Noproto", Field, 0}, + {"IfData.Obytes", Field, 0}, + {"IfData.Oerrors", Field, 0}, + {"IfData.Omcasts", Field, 0}, + {"IfData.Opackets", Field, 0}, + {"IfData.Pad", Field, 2}, + {"IfData.Pad_cgo_0", Field, 2}, + {"IfData.Pad_cgo_1", Field, 2}, + {"IfData.Physical", Field, 0}, + {"IfData.Recvquota", Field, 0}, + {"IfData.Recvtiming", Field, 0}, + {"IfData.Reserved1", Field, 0}, + {"IfData.Reserved2", Field, 0}, + {"IfData.Spare_char1", Field, 0}, + {"IfData.Spare_char2", Field, 0}, + {"IfData.Type", Field, 0}, + {"IfData.Typelen", Field, 0}, + {"IfData.Unused1", Field, 0}, + {"IfData.Unused2", Field, 0}, + {"IfData.Xmitquota", Field, 0}, + {"IfData.Xmittiming", Field, 0}, + {"IfInfomsg", Type, 0}, + {"IfInfomsg.Change", Field, 0}, + {"IfInfomsg.Family", Field, 0}, + {"IfInfomsg.Flags", Field, 0}, + {"IfInfomsg.Index", Field, 0}, + {"IfInfomsg.Type", Field, 0}, + {"IfInfomsg.X__ifi_pad", Field, 0}, + {"IfMsghdr", Type, 0}, + {"IfMsghdr.Addrs", Field, 0}, + {"IfMsghdr.Data", Field, 0}, + {"IfMsghdr.Flags", Field, 0}, + {"IfMsghdr.Hdrlen", Field, 2}, + {"IfMsghdr.Index", Field, 0}, + {"IfMsghdr.Msglen", Field, 0}, + {"IfMsghdr.Pad1", Field, 2}, + {"IfMsghdr.Pad2", Field, 2}, + {"IfMsghdr.Pad_cgo_0", Field, 0}, + {"IfMsghdr.Pad_cgo_1", Field, 2}, + {"IfMsghdr.Tableid", Field, 2}, + {"IfMsghdr.Type", Field, 0}, + {"IfMsghdr.Version", Field, 0}, + {"IfMsghdr.Xflags", Field, 2}, + {"IfaMsghdr", Type, 0}, + {"IfaMsghdr.Addrs", Field, 0}, + {"IfaMsghdr.Flags", Field, 0}, + {"IfaMsghdr.Hdrlen", Field, 2}, + {"IfaMsghdr.Index", Field, 0}, + {"IfaMsghdr.Metric", Field, 0}, + {"IfaMsghdr.Msglen", Field, 0}, + {"IfaMsghdr.Pad1", Field, 2}, + {"IfaMsghdr.Pad2", Field, 2}, + {"IfaMsghdr.Pad_cgo_0", Field, 0}, + {"IfaMsghdr.Tableid", Field, 2}, + {"IfaMsghdr.Type", Field, 0}, + {"IfaMsghdr.Version", Field, 0}, + {"IfmaMsghdr", Type, 0}, + {"IfmaMsghdr.Addrs", Field, 0}, + {"IfmaMsghdr.Flags", Field, 0}, + {"IfmaMsghdr.Index", Field, 0}, + {"IfmaMsghdr.Msglen", Field, 0}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr.Type", Field, 0}, + {"IfmaMsghdr.Version", Field, 0}, + {"IfmaMsghdr2", Type, 0}, + {"IfmaMsghdr2.Addrs", Field, 0}, + {"IfmaMsghdr2.Flags", Field, 0}, + {"IfmaMsghdr2.Index", Field, 0}, + {"IfmaMsghdr2.Msglen", Field, 0}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr2.Refcount", Field, 0}, + {"IfmaMsghdr2.Type", Field, 0}, + {"IfmaMsghdr2.Version", Field, 0}, + {"ImplementsGetwd", Const, 0}, + {"Inet4Pktinfo", Type, 0}, + {"Inet4Pktinfo.Addr", Field, 0}, + {"Inet4Pktinfo.Ifindex", Field, 0}, + {"Inet4Pktinfo.Spec_dst", Field, 0}, + {"Inet6Pktinfo", Type, 0}, + {"Inet6Pktinfo.Addr", Field, 0}, + {"Inet6Pktinfo.Ifindex", Field, 0}, + {"InotifyAddWatch", Func, 0}, + {"InotifyEvent", Type, 0}, + {"InotifyEvent.Cookie", Field, 0}, + {"InotifyEvent.Len", Field, 0}, + {"InotifyEvent.Mask", Field, 0}, + {"InotifyEvent.Name", Field, 0}, + {"InotifyEvent.Wd", Field, 0}, + {"InotifyInit", Func, 0}, + {"InotifyInit1", Func, 0}, + {"InotifyRmWatch", Func, 0}, + {"InterfaceAddrMessage", Type, 0}, + {"InterfaceAddrMessage.Data", Field, 0}, + {"InterfaceAddrMessage.Header", Field, 0}, + {"InterfaceAnnounceMessage", Type, 1}, + {"InterfaceAnnounceMessage.Header", Field, 1}, + {"InterfaceInfo", Type, 0}, + {"InterfaceInfo.Address", Field, 0}, + {"InterfaceInfo.BroadcastAddress", Field, 0}, + {"InterfaceInfo.Flags", Field, 0}, + {"InterfaceInfo.Netmask", Field, 0}, + {"InterfaceMessage", Type, 0}, + {"InterfaceMessage.Data", Field, 0}, + {"InterfaceMessage.Header", Field, 0}, + {"InterfaceMulticastAddrMessage", Type, 0}, + {"InterfaceMulticastAddrMessage.Data", Field, 0}, + {"InterfaceMulticastAddrMessage.Header", Field, 0}, + {"InvalidHandle", Const, 0}, + {"Ioperm", Func, 0}, + {"Iopl", Func, 0}, + {"Iovec", Type, 0}, + {"Iovec.Base", Field, 0}, + {"Iovec.Len", Field, 0}, + {"IpAdapterInfo", Type, 0}, + {"IpAdapterInfo.AdapterName", Field, 0}, + {"IpAdapterInfo.Address", Field, 0}, + {"IpAdapterInfo.AddressLength", Field, 0}, + {"IpAdapterInfo.ComboIndex", Field, 0}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0}, + {"IpAdapterInfo.Description", Field, 0}, + {"IpAdapterInfo.DhcpEnabled", Field, 0}, + {"IpAdapterInfo.DhcpServer", Field, 0}, + {"IpAdapterInfo.GatewayList", Field, 0}, + {"IpAdapterInfo.HaveWins", Field, 0}, + {"IpAdapterInfo.Index", Field, 0}, + {"IpAdapterInfo.IpAddressList", Field, 0}, + {"IpAdapterInfo.LeaseExpires", Field, 0}, + {"IpAdapterInfo.LeaseObtained", Field, 0}, + {"IpAdapterInfo.Next", Field, 0}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0}, + {"IpAdapterInfo.Type", Field, 0}, + {"IpAddrString", Type, 0}, + {"IpAddrString.Context", Field, 0}, + {"IpAddrString.IpAddress", Field, 0}, + {"IpAddrString.IpMask", Field, 0}, + {"IpAddrString.Next", Field, 0}, + {"IpAddressString", Type, 0}, + {"IpAddressString.String", Field, 0}, + {"IpMaskString", Type, 0}, + {"IpMaskString.String", Field, 2}, + {"Issetugid", Func, 0}, + {"KEY_ALL_ACCESS", Const, 0}, + {"KEY_CREATE_LINK", Const, 0}, + {"KEY_CREATE_SUB_KEY", Const, 0}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0}, + {"KEY_EXECUTE", Const, 0}, + {"KEY_NOTIFY", Const, 0}, + {"KEY_QUERY_VALUE", Const, 0}, + {"KEY_READ", Const, 0}, + {"KEY_SET_VALUE", Const, 0}, + {"KEY_WOW64_32KEY", Const, 0}, + {"KEY_WOW64_64KEY", Const, 0}, + {"KEY_WRITE", Const, 0}, + {"Kevent", Func, 0}, + {"Kevent_t", Type, 0}, + {"Kevent_t.Data", Field, 0}, + {"Kevent_t.Fflags", Field, 0}, + {"Kevent_t.Filter", Field, 0}, + {"Kevent_t.Flags", Field, 0}, + {"Kevent_t.Ident", Field, 0}, + {"Kevent_t.Pad_cgo_0", Field, 2}, + {"Kevent_t.Udata", Field, 0}, + {"Kill", Func, 0}, + {"Klogctl", Func, 0}, + {"Kqueue", Func, 0}, + {"LANG_ENGLISH", Const, 0}, + {"LAYERED_PROTOCOL", Const, 2}, + {"LCNT_OVERLOAD_FLUSH", Const, 1}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0}, + {"LINUX_REBOOT_CMD_HALT", Const, 0}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0}, + {"LINUX_REBOOT_MAGIC1", Const, 0}, + {"LINUX_REBOOT_MAGIC2", Const, 0}, + {"LOCK_EX", Const, 0}, + {"LOCK_NB", Const, 0}, + {"LOCK_SH", Const, 0}, + {"LOCK_UN", Const, 0}, + {"LazyDLL", Type, 0}, + {"LazyDLL.Name", Field, 0}, + {"LazyProc", Type, 0}, + {"LazyProc.Name", Field, 0}, + {"Lchown", Func, 0}, + {"Linger", Type, 0}, + {"Linger.Linger", Field, 0}, + {"Linger.Onoff", Field, 0}, + {"Link", Func, 0}, + {"Listen", Func, 0}, + {"Listxattr", Func, 1}, + {"LoadCancelIoEx", Func, 1}, + {"LoadConnectEx", Func, 1}, + {"LoadCreateSymbolicLink", Func, 4}, + {"LoadDLL", Func, 0}, + {"LoadGetAddrInfo", Func, 1}, + {"LoadLibrary", Func, 0}, + {"LoadSetFileCompletionNotificationModes", Func, 2}, + {"LocalFree", Func, 0}, + {"Log2phys_t", Type, 0}, + {"Log2phys_t.Contigbytes", Field, 0}, + {"Log2phys_t.Devoffset", Field, 0}, + {"Log2phys_t.Flags", Field, 0}, + {"LookupAccountName", Func, 0}, + {"LookupAccountSid", Func, 0}, + {"LookupSID", Func, 0}, + {"LsfJump", Func, 0}, + {"LsfSocket", Func, 0}, + {"LsfStmt", Func, 0}, + {"Lstat", Func, 0}, + {"MADV_AUTOSYNC", Const, 1}, + {"MADV_CAN_REUSE", Const, 0}, + {"MADV_CORE", Const, 1}, + {"MADV_DOFORK", Const, 0}, + {"MADV_DONTFORK", Const, 0}, + {"MADV_DONTNEED", Const, 0}, + {"MADV_FREE", Const, 0}, + {"MADV_FREE_REUSABLE", Const, 0}, + {"MADV_FREE_REUSE", Const, 0}, + {"MADV_HUGEPAGE", Const, 0}, + {"MADV_HWPOISON", Const, 0}, + {"MADV_MERGEABLE", Const, 0}, + {"MADV_NOCORE", Const, 1}, + {"MADV_NOHUGEPAGE", Const, 0}, + {"MADV_NORMAL", Const, 0}, + {"MADV_NOSYNC", Const, 1}, + {"MADV_PROTECT", Const, 1}, + {"MADV_RANDOM", Const, 0}, + {"MADV_REMOVE", Const, 0}, + {"MADV_SEQUENTIAL", Const, 0}, + {"MADV_SPACEAVAIL", Const, 3}, + {"MADV_UNMERGEABLE", Const, 0}, + {"MADV_WILLNEED", Const, 0}, + {"MADV_ZERO_WIRED_PAGES", Const, 0}, + {"MAP_32BIT", Const, 0}, + {"MAP_ALIGNED_SUPER", Const, 3}, + {"MAP_ALIGNMENT_16MB", Const, 3}, + {"MAP_ALIGNMENT_1TB", Const, 3}, + {"MAP_ALIGNMENT_256TB", Const, 3}, + {"MAP_ALIGNMENT_4GB", Const, 3}, + {"MAP_ALIGNMENT_64KB", Const, 3}, + {"MAP_ALIGNMENT_64PB", Const, 3}, + {"MAP_ALIGNMENT_MASK", Const, 3}, + {"MAP_ALIGNMENT_SHIFT", Const, 3}, + {"MAP_ANON", Const, 0}, + {"MAP_ANONYMOUS", Const, 0}, + {"MAP_COPY", Const, 0}, + {"MAP_DENYWRITE", Const, 0}, + {"MAP_EXECUTABLE", Const, 0}, + {"MAP_FILE", Const, 0}, + {"MAP_FIXED", Const, 0}, + {"MAP_FLAGMASK", Const, 3}, + {"MAP_GROWSDOWN", Const, 0}, + {"MAP_HASSEMAPHORE", Const, 0}, + {"MAP_HUGETLB", Const, 0}, + {"MAP_INHERIT", Const, 3}, + {"MAP_INHERIT_COPY", Const, 3}, + {"MAP_INHERIT_DEFAULT", Const, 3}, + {"MAP_INHERIT_DONATE_COPY", Const, 3}, + {"MAP_INHERIT_NONE", Const, 3}, + {"MAP_INHERIT_SHARE", Const, 3}, + {"MAP_JIT", Const, 0}, + {"MAP_LOCKED", Const, 0}, + {"MAP_NOCACHE", Const, 0}, + {"MAP_NOCORE", Const, 1}, + {"MAP_NOEXTEND", Const, 0}, + {"MAP_NONBLOCK", Const, 0}, + {"MAP_NORESERVE", Const, 0}, + {"MAP_NOSYNC", Const, 1}, + {"MAP_POPULATE", Const, 0}, + {"MAP_PREFAULT_READ", Const, 1}, + {"MAP_PRIVATE", Const, 0}, + {"MAP_RENAME", Const, 0}, + {"MAP_RESERVED0080", Const, 0}, + {"MAP_RESERVED0100", Const, 1}, + {"MAP_SHARED", Const, 0}, + {"MAP_STACK", Const, 0}, + {"MAP_TRYFIXED", Const, 3}, + {"MAP_TYPE", Const, 0}, + {"MAP_WIRED", Const, 3}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4}, + {"MAXLEN_IFDESCR", Const, 0}, + {"MAXLEN_PHYSADDR", Const, 0}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0}, + {"MAX_INTERFACE_NAME_LEN", Const, 0}, + {"MAX_LONG_PATH", Const, 0}, + {"MAX_PATH", Const, 0}, + {"MAX_PROTOCOL_CHAIN", Const, 2}, + {"MCL_CURRENT", Const, 0}, + {"MCL_FUTURE", Const, 0}, + {"MNT_DETACH", Const, 0}, + {"MNT_EXPIRE", Const, 0}, + {"MNT_FORCE", Const, 0}, + {"MSG_BCAST", Const, 1}, + {"MSG_CMSG_CLOEXEC", Const, 0}, + {"MSG_COMPAT", Const, 0}, + {"MSG_CONFIRM", Const, 0}, + {"MSG_CONTROLMBUF", Const, 1}, + {"MSG_CTRUNC", Const, 0}, + {"MSG_DONTROUTE", Const, 0}, + {"MSG_DONTWAIT", Const, 0}, + {"MSG_EOF", Const, 0}, + {"MSG_EOR", Const, 0}, + {"MSG_ERRQUEUE", Const, 0}, + {"MSG_FASTOPEN", Const, 1}, + {"MSG_FIN", Const, 0}, + {"MSG_FLUSH", Const, 0}, + {"MSG_HAVEMORE", Const, 0}, + {"MSG_HOLD", Const, 0}, + {"MSG_IOVUSRSPACE", Const, 1}, + {"MSG_LENUSRSPACE", Const, 1}, + {"MSG_MCAST", Const, 1}, + {"MSG_MORE", Const, 0}, + {"MSG_NAMEMBUF", Const, 1}, + {"MSG_NBIO", Const, 0}, + {"MSG_NEEDSA", Const, 0}, + {"MSG_NOSIGNAL", Const, 0}, + {"MSG_NOTIFICATION", Const, 0}, + {"MSG_OOB", Const, 0}, + {"MSG_PEEK", Const, 0}, + {"MSG_PROXY", Const, 0}, + {"MSG_RCVMORE", Const, 0}, + {"MSG_RST", Const, 0}, + {"MSG_SEND", Const, 0}, + {"MSG_SYN", Const, 0}, + {"MSG_TRUNC", Const, 0}, + {"MSG_TRYHARD", Const, 0}, + {"MSG_USERFLAGS", Const, 1}, + {"MSG_WAITALL", Const, 0}, + {"MSG_WAITFORONE", Const, 0}, + {"MSG_WAITSTREAM", Const, 0}, + {"MS_ACTIVE", Const, 0}, + {"MS_ASYNC", Const, 0}, + {"MS_BIND", Const, 0}, + {"MS_DEACTIVATE", Const, 0}, + {"MS_DIRSYNC", Const, 0}, + {"MS_INVALIDATE", Const, 0}, + {"MS_I_VERSION", Const, 0}, + {"MS_KERNMOUNT", Const, 0}, + {"MS_KILLPAGES", Const, 0}, + {"MS_MANDLOCK", Const, 0}, + {"MS_MGC_MSK", Const, 0}, + {"MS_MGC_VAL", Const, 0}, + {"MS_MOVE", Const, 0}, + {"MS_NOATIME", Const, 0}, + {"MS_NODEV", Const, 0}, + {"MS_NODIRATIME", Const, 0}, + {"MS_NOEXEC", Const, 0}, + {"MS_NOSUID", Const, 0}, + {"MS_NOUSER", Const, 0}, + {"MS_POSIXACL", Const, 0}, + {"MS_PRIVATE", Const, 0}, + {"MS_RDONLY", Const, 0}, + {"MS_REC", Const, 0}, + {"MS_RELATIME", Const, 0}, + {"MS_REMOUNT", Const, 0}, + {"MS_RMT_MASK", Const, 0}, + {"MS_SHARED", Const, 0}, + {"MS_SILENT", Const, 0}, + {"MS_SLAVE", Const, 0}, + {"MS_STRICTATIME", Const, 0}, + {"MS_SYNC", Const, 0}, + {"MS_SYNCHRONOUS", Const, 0}, + {"MS_UNBINDABLE", Const, 0}, + {"Madvise", Func, 0}, + {"MapViewOfFile", Func, 0}, + {"MaxTokenInfoClass", Const, 0}, + {"Mclpool", Type, 2}, + {"Mclpool.Alive", Field, 2}, + {"Mclpool.Cwm", Field, 2}, + {"Mclpool.Grown", Field, 2}, + {"Mclpool.Hwm", Field, 2}, + {"Mclpool.Lwm", Field, 2}, + {"MibIfRow", Type, 0}, + {"MibIfRow.AdminStatus", Field, 0}, + {"MibIfRow.Descr", Field, 0}, + {"MibIfRow.DescrLen", Field, 0}, + {"MibIfRow.InDiscards", Field, 0}, + {"MibIfRow.InErrors", Field, 0}, + {"MibIfRow.InNUcastPkts", Field, 0}, + {"MibIfRow.InOctets", Field, 0}, + {"MibIfRow.InUcastPkts", Field, 0}, + {"MibIfRow.InUnknownProtos", Field, 0}, + {"MibIfRow.Index", Field, 0}, + {"MibIfRow.LastChange", Field, 0}, + {"MibIfRow.Mtu", Field, 0}, + {"MibIfRow.Name", Field, 0}, + {"MibIfRow.OperStatus", Field, 0}, + {"MibIfRow.OutDiscards", Field, 0}, + {"MibIfRow.OutErrors", Field, 0}, + {"MibIfRow.OutNUcastPkts", Field, 0}, + {"MibIfRow.OutOctets", Field, 0}, + {"MibIfRow.OutQLen", Field, 0}, + {"MibIfRow.OutUcastPkts", Field, 0}, + {"MibIfRow.PhysAddr", Field, 0}, + {"MibIfRow.PhysAddrLen", Field, 0}, + {"MibIfRow.Speed", Field, 0}, + {"MibIfRow.Type", Field, 0}, + {"Mkdir", Func, 0}, + {"Mkdirat", Func, 0}, + {"Mkfifo", Func, 0}, + {"Mknod", Func, 0}, + {"Mknodat", Func, 0}, + {"Mlock", Func, 0}, + {"Mlockall", Func, 0}, + {"Mmap", Func, 0}, + {"Mount", Func, 0}, + {"MoveFile", Func, 0}, + {"Mprotect", Func, 0}, + {"Msghdr", Type, 0}, + {"Msghdr.Control", Field, 0}, + {"Msghdr.Controllen", Field, 0}, + {"Msghdr.Flags", Field, 0}, + {"Msghdr.Iov", Field, 0}, + {"Msghdr.Iovlen", Field, 0}, + {"Msghdr.Name", Field, 0}, + {"Msghdr.Namelen", Field, 0}, + {"Msghdr.Pad_cgo_0", Field, 0}, + {"Msghdr.Pad_cgo_1", Field, 0}, + {"Munlock", Func, 0}, + {"Munlockall", Func, 0}, + {"Munmap", Func, 0}, + {"MustLoadDLL", Func, 0}, + {"NAME_MAX", Const, 0}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0}, + {"NETLINK_AUDIT", Const, 0}, + {"NETLINK_BROADCAST_ERROR", Const, 0}, + {"NETLINK_CONNECTOR", Const, 0}, + {"NETLINK_DNRTMSG", Const, 0}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0}, + {"NETLINK_ECRYPTFS", Const, 0}, + {"NETLINK_FIB_LOOKUP", Const, 0}, + {"NETLINK_FIREWALL", Const, 0}, + {"NETLINK_GENERIC", Const, 0}, + {"NETLINK_INET_DIAG", Const, 0}, + {"NETLINK_IP6_FW", Const, 0}, + {"NETLINK_ISCSI", Const, 0}, + {"NETLINK_KOBJECT_UEVENT", Const, 0}, + {"NETLINK_NETFILTER", Const, 0}, + {"NETLINK_NFLOG", Const, 0}, + {"NETLINK_NO_ENOBUFS", Const, 0}, + {"NETLINK_PKTINFO", Const, 0}, + {"NETLINK_RDMA", Const, 0}, + {"NETLINK_ROUTE", Const, 0}, + {"NETLINK_SCSITRANSPORT", Const, 0}, + {"NETLINK_SELINUX", Const, 0}, + {"NETLINK_UNUSED", Const, 0}, + {"NETLINK_USERSOCK", Const, 0}, + {"NETLINK_XFRM", Const, 0}, + {"NET_RT_DUMP", Const, 0}, + {"NET_RT_DUMP2", Const, 0}, + {"NET_RT_FLAGS", Const, 0}, + {"NET_RT_IFLIST", Const, 0}, + {"NET_RT_IFLIST2", Const, 0}, + {"NET_RT_IFLISTL", Const, 1}, + {"NET_RT_IFMALIST", Const, 0}, + {"NET_RT_MAXID", Const, 0}, + {"NET_RT_OIFLIST", Const, 1}, + {"NET_RT_OOIFLIST", Const, 1}, + {"NET_RT_STAT", Const, 0}, + {"NET_RT_STATS", Const, 1}, + {"NET_RT_TABLE", Const, 1}, + {"NET_RT_TRASH", Const, 0}, + {"NLA_ALIGNTO", Const, 0}, + {"NLA_F_NESTED", Const, 0}, + {"NLA_F_NET_BYTEORDER", Const, 0}, + {"NLA_HDRLEN", Const, 0}, + {"NLMSG_ALIGNTO", Const, 0}, + {"NLMSG_DONE", Const, 0}, + {"NLMSG_ERROR", Const, 0}, + {"NLMSG_HDRLEN", Const, 0}, + {"NLMSG_MIN_TYPE", Const, 0}, + {"NLMSG_NOOP", Const, 0}, + {"NLMSG_OVERRUN", Const, 0}, + {"NLM_F_ACK", Const, 0}, + {"NLM_F_APPEND", Const, 0}, + {"NLM_F_ATOMIC", Const, 0}, + {"NLM_F_CREATE", Const, 0}, + {"NLM_F_DUMP", Const, 0}, + {"NLM_F_ECHO", Const, 0}, + {"NLM_F_EXCL", Const, 0}, + {"NLM_F_MATCH", Const, 0}, + {"NLM_F_MULTI", Const, 0}, + {"NLM_F_REPLACE", Const, 0}, + {"NLM_F_REQUEST", Const, 0}, + {"NLM_F_ROOT", Const, 0}, + {"NOFLSH", Const, 0}, + {"NOTE_ABSOLUTE", Const, 0}, + {"NOTE_ATTRIB", Const, 0}, + {"NOTE_BACKGROUND", Const, 16}, + {"NOTE_CHILD", Const, 0}, + {"NOTE_CRITICAL", Const, 16}, + {"NOTE_DELETE", Const, 0}, + {"NOTE_EOF", Const, 1}, + {"NOTE_EXEC", Const, 0}, + {"NOTE_EXIT", Const, 0}, + {"NOTE_EXITSTATUS", Const, 0}, + {"NOTE_EXIT_CSERROR", Const, 16}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16}, + {"NOTE_EXIT_DETAIL", Const, 16}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16}, + {"NOTE_EXIT_MEMORY", Const, 16}, + {"NOTE_EXIT_REPARENTED", Const, 16}, + {"NOTE_EXTEND", Const, 0}, + {"NOTE_FFAND", Const, 0}, + {"NOTE_FFCOPY", Const, 0}, + {"NOTE_FFCTRLMASK", Const, 0}, + {"NOTE_FFLAGSMASK", Const, 0}, + {"NOTE_FFNOP", Const, 0}, + {"NOTE_FFOR", Const, 0}, + {"NOTE_FORK", Const, 0}, + {"NOTE_LEEWAY", Const, 16}, + {"NOTE_LINK", Const, 0}, + {"NOTE_LOWAT", Const, 0}, + {"NOTE_NONE", Const, 0}, + {"NOTE_NSECONDS", Const, 0}, + {"NOTE_PCTRLMASK", Const, 0}, + {"NOTE_PDATAMASK", Const, 0}, + {"NOTE_REAP", Const, 0}, + {"NOTE_RENAME", Const, 0}, + {"NOTE_RESOURCEEND", Const, 0}, + {"NOTE_REVOKE", Const, 0}, + {"NOTE_SECONDS", Const, 0}, + {"NOTE_SIGNAL", Const, 0}, + {"NOTE_TRACK", Const, 0}, + {"NOTE_TRACKERR", Const, 0}, + {"NOTE_TRIGGER", Const, 0}, + {"NOTE_TRUNCATE", Const, 1}, + {"NOTE_USECONDS", Const, 0}, + {"NOTE_VM_ERROR", Const, 0}, + {"NOTE_VM_PRESSURE", Const, 0}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0}, + {"NOTE_WRITE", Const, 0}, + {"NameCanonical", Const, 0}, + {"NameCanonicalEx", Const, 0}, + {"NameDisplay", Const, 0}, + {"NameDnsDomain", Const, 0}, + {"NameFullyQualifiedDN", Const, 0}, + {"NameSamCompatible", Const, 0}, + {"NameServicePrincipal", Const, 0}, + {"NameUniqueId", Const, 0}, + {"NameUnknown", Const, 0}, + {"NameUserPrincipal", Const, 0}, + {"Nanosleep", Func, 0}, + {"NetApiBufferFree", Func, 0}, + {"NetGetJoinInformation", Func, 2}, + {"NetSetupDomainName", Const, 2}, + {"NetSetupUnjoined", Const, 2}, + {"NetSetupUnknownStatus", Const, 2}, + {"NetSetupWorkgroupName", Const, 2}, + {"NetUserGetInfo", Func, 0}, + {"NetlinkMessage", Type, 0}, + {"NetlinkMessage.Data", Field, 0}, + {"NetlinkMessage.Header", Field, 0}, + {"NetlinkRIB", Func, 0}, + {"NetlinkRouteAttr", Type, 0}, + {"NetlinkRouteAttr.Attr", Field, 0}, + {"NetlinkRouteAttr.Value", Field, 0}, + {"NetlinkRouteRequest", Type, 0}, + {"NetlinkRouteRequest.Data", Field, 0}, + {"NetlinkRouteRequest.Header", Field, 0}, + {"NewCallback", Func, 0}, + {"NewCallbackCDecl", Func, 3}, + {"NewLazyDLL", Func, 0}, + {"NlAttr", Type, 0}, + {"NlAttr.Len", Field, 0}, + {"NlAttr.Type", Field, 0}, + {"NlMsgerr", Type, 0}, + {"NlMsgerr.Error", Field, 0}, + {"NlMsgerr.Msg", Field, 0}, + {"NlMsghdr", Type, 0}, + {"NlMsghdr.Flags", Field, 0}, + {"NlMsghdr.Len", Field, 0}, + {"NlMsghdr.Pid", Field, 0}, + {"NlMsghdr.Seq", Field, 0}, + {"NlMsghdr.Type", Field, 0}, + {"NsecToFiletime", Func, 0}, + {"NsecToTimespec", Func, 0}, + {"NsecToTimeval", Func, 0}, + {"Ntohs", Func, 0}, + {"OCRNL", Const, 0}, + {"OFDEL", Const, 0}, + {"OFILL", Const, 0}, + {"OFIOGETBMAP", Const, 1}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0}, + {"OID_SERVER_GATED_CRYPTO", Var, 0}, + {"OID_SGC_NETSCAPE", Var, 0}, + {"OLCUC", Const, 0}, + {"ONLCR", Const, 0}, + {"ONLRET", Const, 0}, + {"ONOCR", Const, 0}, + {"ONOEOT", Const, 1}, + {"OPEN_ALWAYS", Const, 0}, + {"OPEN_EXISTING", Const, 0}, + {"OPOST", Const, 0}, + {"O_ACCMODE", Const, 0}, + {"O_ALERT", Const, 0}, + {"O_ALT_IO", Const, 1}, + {"O_APPEND", Const, 0}, + {"O_ASYNC", Const, 0}, + {"O_CLOEXEC", Const, 0}, + {"O_CREAT", Const, 0}, + {"O_DIRECT", Const, 0}, + {"O_DIRECTORY", Const, 0}, + {"O_DP_GETRAWENCRYPTED", Const, 16}, + {"O_DSYNC", Const, 0}, + {"O_EVTONLY", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_EXEC", Const, 0}, + {"O_EXLOCK", Const, 0}, + {"O_FSYNC", Const, 0}, + {"O_LARGEFILE", Const, 0}, + {"O_NDELAY", Const, 0}, + {"O_NOATIME", Const, 0}, + {"O_NOCTTY", Const, 0}, + {"O_NOFOLLOW", Const, 0}, + {"O_NONBLOCK", Const, 0}, + {"O_NOSIGPIPE", Const, 1}, + {"O_POPUP", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_RSYNC", Const, 0}, + {"O_SHLOCK", Const, 0}, + {"O_SYMLINK", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_TTY_INIT", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenCurrentProcessToken", Func, 0}, + {"OpenProcess", Func, 0}, + {"OpenProcessToken", Func, 0}, + {"Openat", Func, 0}, + {"Overlapped", Type, 0}, + {"Overlapped.HEvent", Field, 0}, + {"Overlapped.Internal", Field, 0}, + {"Overlapped.InternalHigh", Field, 0}, + {"Overlapped.Offset", Field, 0}, + {"Overlapped.OffsetHigh", Field, 0}, + {"PACKET_ADD_MEMBERSHIP", Const, 0}, + {"PACKET_BROADCAST", Const, 0}, + {"PACKET_DROP_MEMBERSHIP", Const, 0}, + {"PACKET_FASTROUTE", Const, 0}, + {"PACKET_HOST", Const, 0}, + {"PACKET_LOOPBACK", Const, 0}, + {"PACKET_MR_ALLMULTI", Const, 0}, + {"PACKET_MR_MULTICAST", Const, 0}, + {"PACKET_MR_PROMISC", Const, 0}, + {"PACKET_MULTICAST", Const, 0}, + {"PACKET_OTHERHOST", Const, 0}, + {"PACKET_OUTGOING", Const, 0}, + {"PACKET_RECV_OUTPUT", Const, 0}, + {"PACKET_RX_RING", Const, 0}, + {"PACKET_STATISTICS", Const, 0}, + {"PAGE_EXECUTE_READ", Const, 0}, + {"PAGE_EXECUTE_READWRITE", Const, 0}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0}, + {"PAGE_READONLY", Const, 0}, + {"PAGE_READWRITE", Const, 0}, + {"PAGE_WRITECOPY", Const, 0}, + {"PARENB", Const, 0}, + {"PARMRK", Const, 0}, + {"PARODD", Const, 0}, + {"PENDIN", Const, 0}, + {"PFL_HIDDEN", Const, 2}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2}, + {"PF_FLUSH", Const, 1}, + {"PKCS_7_ASN_ENCODING", Const, 0}, + {"PMC5_PIPELINE_FLUSH", Const, 1}, + {"PRIO_PGRP", Const, 2}, + {"PRIO_PROCESS", Const, 2}, + {"PRIO_USER", Const, 2}, + {"PRI_IOFLUSH", Const, 1}, + {"PROCESS_QUERY_INFORMATION", Const, 0}, + {"PROCESS_TERMINATE", Const, 2}, + {"PROT_EXEC", Const, 0}, + {"PROT_GROWSDOWN", Const, 0}, + {"PROT_GROWSUP", Const, 0}, + {"PROT_NONE", Const, 0}, + {"PROT_READ", Const, 0}, + {"PROT_WRITE", Const, 0}, + {"PROV_DH_SCHANNEL", Const, 0}, + {"PROV_DSS", Const, 0}, + {"PROV_DSS_DH", Const, 0}, + {"PROV_EC_ECDSA_FULL", Const, 0}, + {"PROV_EC_ECDSA_SIG", Const, 0}, + {"PROV_EC_ECNRA_FULL", Const, 0}, + {"PROV_EC_ECNRA_SIG", Const, 0}, + {"PROV_FORTEZZA", Const, 0}, + {"PROV_INTEL_SEC", Const, 0}, + {"PROV_MS_EXCHANGE", Const, 0}, + {"PROV_REPLACE_OWF", Const, 0}, + {"PROV_RNG", Const, 0}, + {"PROV_RSA_AES", Const, 0}, + {"PROV_RSA_FULL", Const, 0}, + {"PROV_RSA_SCHANNEL", Const, 0}, + {"PROV_RSA_SIG", Const, 0}, + {"PROV_SPYRUS_LYNKS", Const, 0}, + {"PROV_SSL", Const, 0}, + {"PR_CAPBSET_DROP", Const, 0}, + {"PR_CAPBSET_READ", Const, 0}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0}, + {"PR_ENDIAN_BIG", Const, 0}, + {"PR_ENDIAN_LITTLE", Const, 0}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0}, + {"PR_FPEMU_NOPRINT", Const, 0}, + {"PR_FPEMU_SIGFPE", Const, 0}, + {"PR_FP_EXC_ASYNC", Const, 0}, + {"PR_FP_EXC_DISABLED", Const, 0}, + {"PR_FP_EXC_DIV", Const, 0}, + {"PR_FP_EXC_INV", Const, 0}, + {"PR_FP_EXC_NONRECOV", Const, 0}, + {"PR_FP_EXC_OVF", Const, 0}, + {"PR_FP_EXC_PRECISE", Const, 0}, + {"PR_FP_EXC_RES", Const, 0}, + {"PR_FP_EXC_SW_ENABLE", Const, 0}, + {"PR_FP_EXC_UND", Const, 0}, + {"PR_GET_DUMPABLE", Const, 0}, + {"PR_GET_ENDIAN", Const, 0}, + {"PR_GET_FPEMU", Const, 0}, + {"PR_GET_FPEXC", Const, 0}, + {"PR_GET_KEEPCAPS", Const, 0}, + {"PR_GET_NAME", Const, 0}, + {"PR_GET_PDEATHSIG", Const, 0}, + {"PR_GET_SECCOMP", Const, 0}, + {"PR_GET_SECCOMP_FILTER", Const, 0}, + {"PR_GET_SECUREBITS", Const, 0}, + {"PR_GET_TIMERSLACK", Const, 0}, + {"PR_GET_TIMING", Const, 0}, + {"PR_GET_TSC", Const, 0}, + {"PR_GET_UNALIGN", Const, 0}, + {"PR_MCE_KILL", Const, 0}, + {"PR_MCE_KILL_CLEAR", Const, 0}, + {"PR_MCE_KILL_DEFAULT", Const, 0}, + {"PR_MCE_KILL_EARLY", Const, 0}, + {"PR_MCE_KILL_GET", Const, 0}, + {"PR_MCE_KILL_LATE", Const, 0}, + {"PR_MCE_KILL_SET", Const, 0}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0}, + {"PR_SET_DUMPABLE", Const, 0}, + {"PR_SET_ENDIAN", Const, 0}, + {"PR_SET_FPEMU", Const, 0}, + {"PR_SET_FPEXC", Const, 0}, + {"PR_SET_KEEPCAPS", Const, 0}, + {"PR_SET_NAME", Const, 0}, + {"PR_SET_PDEATHSIG", Const, 0}, + {"PR_SET_PTRACER", Const, 0}, + {"PR_SET_SECCOMP", Const, 0}, + {"PR_SET_SECCOMP_FILTER", Const, 0}, + {"PR_SET_SECUREBITS", Const, 0}, + {"PR_SET_TIMERSLACK", Const, 0}, + {"PR_SET_TIMING", Const, 0}, + {"PR_SET_TSC", Const, 0}, + {"PR_SET_UNALIGN", Const, 0}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0}, + {"PR_TIMING_STATISTICAL", Const, 0}, + {"PR_TIMING_TIMESTAMP", Const, 0}, + {"PR_TSC_ENABLE", Const, 0}, + {"PR_TSC_SIGSEGV", Const, 0}, + {"PR_UNALIGN_NOPRINT", Const, 0}, + {"PR_UNALIGN_SIGBUS", Const, 0}, + {"PTRACE_ARCH_PRCTL", Const, 0}, + {"PTRACE_ATTACH", Const, 0}, + {"PTRACE_CONT", Const, 0}, + {"PTRACE_DETACH", Const, 0}, + {"PTRACE_EVENT_CLONE", Const, 0}, + {"PTRACE_EVENT_EXEC", Const, 0}, + {"PTRACE_EVENT_EXIT", Const, 0}, + {"PTRACE_EVENT_FORK", Const, 0}, + {"PTRACE_EVENT_VFORK", Const, 0}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0}, + {"PTRACE_GETCRUNCHREGS", Const, 0}, + {"PTRACE_GETEVENTMSG", Const, 0}, + {"PTRACE_GETFPREGS", Const, 0}, + {"PTRACE_GETFPXREGS", Const, 0}, + {"PTRACE_GETHBPREGS", Const, 0}, + {"PTRACE_GETREGS", Const, 0}, + {"PTRACE_GETREGSET", Const, 0}, + {"PTRACE_GETSIGINFO", Const, 0}, + {"PTRACE_GETVFPREGS", Const, 0}, + {"PTRACE_GETWMMXREGS", Const, 0}, + {"PTRACE_GET_THREAD_AREA", Const, 0}, + {"PTRACE_KILL", Const, 0}, + {"PTRACE_OLDSETOPTIONS", Const, 0}, + {"PTRACE_O_MASK", Const, 0}, + {"PTRACE_O_TRACECLONE", Const, 0}, + {"PTRACE_O_TRACEEXEC", Const, 0}, + {"PTRACE_O_TRACEEXIT", Const, 0}, + {"PTRACE_O_TRACEFORK", Const, 0}, + {"PTRACE_O_TRACESYSGOOD", Const, 0}, + {"PTRACE_O_TRACEVFORK", Const, 0}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0}, + {"PTRACE_PEEKDATA", Const, 0}, + {"PTRACE_PEEKTEXT", Const, 0}, + {"PTRACE_PEEKUSR", Const, 0}, + {"PTRACE_POKEDATA", Const, 0}, + {"PTRACE_POKETEXT", Const, 0}, + {"PTRACE_POKEUSR", Const, 0}, + {"PTRACE_SETCRUNCHREGS", Const, 0}, + {"PTRACE_SETFPREGS", Const, 0}, + {"PTRACE_SETFPXREGS", Const, 0}, + {"PTRACE_SETHBPREGS", Const, 0}, + {"PTRACE_SETOPTIONS", Const, 0}, + {"PTRACE_SETREGS", Const, 0}, + {"PTRACE_SETREGSET", Const, 0}, + {"PTRACE_SETSIGINFO", Const, 0}, + {"PTRACE_SETVFPREGS", Const, 0}, + {"PTRACE_SETWMMXREGS", Const, 0}, + {"PTRACE_SET_SYSCALL", Const, 0}, + {"PTRACE_SET_THREAD_AREA", Const, 0}, + {"PTRACE_SINGLEBLOCK", Const, 0}, + {"PTRACE_SINGLESTEP", Const, 0}, + {"PTRACE_SYSCALL", Const, 0}, + {"PTRACE_SYSEMU", Const, 0}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0}, + {"PTRACE_TRACEME", Const, 0}, + {"PT_ATTACH", Const, 0}, + {"PT_ATTACHEXC", Const, 0}, + {"PT_CONTINUE", Const, 0}, + {"PT_DATA_ADDR", Const, 0}, + {"PT_DENY_ATTACH", Const, 0}, + {"PT_DETACH", Const, 0}, + {"PT_FIRSTMACH", Const, 0}, + {"PT_FORCEQUOTA", Const, 0}, + {"PT_KILL", Const, 0}, + {"PT_MASK", Const, 1}, + {"PT_READ_D", Const, 0}, + {"PT_READ_I", Const, 0}, + {"PT_READ_U", Const, 0}, + {"PT_SIGEXC", Const, 0}, + {"PT_STEP", Const, 0}, + {"PT_TEXT_ADDR", Const, 0}, + {"PT_TEXT_END_ADDR", Const, 0}, + {"PT_THUPDATE", Const, 0}, + {"PT_TRACE_ME", Const, 0}, + {"PT_WRITE_D", Const, 0}, + {"PT_WRITE_I", Const, 0}, + {"PT_WRITE_U", Const, 0}, + {"ParseDirent", Func, 0}, + {"ParseNetlinkMessage", Func, 0}, + {"ParseNetlinkRouteAttr", Func, 0}, + {"ParseRoutingMessage", Func, 0}, + {"ParseRoutingSockaddr", Func, 0}, + {"ParseSocketControlMessage", Func, 0}, + {"ParseUnixCredentials", Func, 0}, + {"ParseUnixRights", Func, 0}, + {"PathMax", Const, 0}, + {"Pathconf", Func, 0}, + {"Pause", Func, 0}, + {"Pipe", Func, 0}, + {"Pipe2", Func, 1}, + {"PivotRoot", Func, 0}, + {"Pointer", Type, 11}, + {"PostQueuedCompletionStatus", Func, 0}, + {"Pread", Func, 0}, + {"Proc", Type, 0}, + {"Proc.Dll", Field, 0}, + {"Proc.Name", Field, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process32First", Func, 4}, + {"Process32Next", Func, 4}, + {"ProcessEntry32", Type, 4}, + {"ProcessEntry32.DefaultHeapID", Field, 4}, + {"ProcessEntry32.ExeFile", Field, 4}, + {"ProcessEntry32.Flags", Field, 4}, + {"ProcessEntry32.ModuleID", Field, 4}, + {"ProcessEntry32.ParentProcessID", Field, 4}, + {"ProcessEntry32.PriClassBase", Field, 4}, + {"ProcessEntry32.ProcessID", Field, 4}, + {"ProcessEntry32.Size", Field, 4}, + {"ProcessEntry32.Threads", Field, 4}, + {"ProcessEntry32.Usage", Field, 4}, + {"ProcessInformation", Type, 0}, + {"ProcessInformation.Process", Field, 0}, + {"ProcessInformation.ProcessId", Field, 0}, + {"ProcessInformation.Thread", Field, 0}, + {"ProcessInformation.ThreadId", Field, 0}, + {"Protoent", Type, 0}, + {"Protoent.Aliases", Field, 0}, + {"Protoent.Name", Field, 0}, + {"Protoent.Proto", Field, 0}, + {"PtraceAttach", Func, 0}, + {"PtraceCont", Func, 0}, + {"PtraceDetach", Func, 0}, + {"PtraceGetEventMsg", Func, 0}, + {"PtraceGetRegs", Func, 0}, + {"PtracePeekData", Func, 0}, + {"PtracePeekText", Func, 0}, + {"PtracePokeData", Func, 0}, + {"PtracePokeText", Func, 0}, + {"PtraceRegs", Type, 0}, + {"PtraceRegs.Cs", Field, 0}, + {"PtraceRegs.Ds", Field, 0}, + {"PtraceRegs.Eax", Field, 0}, + {"PtraceRegs.Ebp", Field, 0}, + {"PtraceRegs.Ebx", Field, 0}, + {"PtraceRegs.Ecx", Field, 0}, + {"PtraceRegs.Edi", Field, 0}, + {"PtraceRegs.Edx", Field, 0}, + {"PtraceRegs.Eflags", Field, 0}, + {"PtraceRegs.Eip", Field, 0}, + {"PtraceRegs.Es", Field, 0}, + {"PtraceRegs.Esi", Field, 0}, + {"PtraceRegs.Esp", Field, 0}, + {"PtraceRegs.Fs", Field, 0}, + {"PtraceRegs.Fs_base", Field, 0}, + {"PtraceRegs.Gs", Field, 0}, + {"PtraceRegs.Gs_base", Field, 0}, + {"PtraceRegs.Orig_eax", Field, 0}, + {"PtraceRegs.Orig_rax", Field, 0}, + {"PtraceRegs.R10", Field, 0}, + {"PtraceRegs.R11", Field, 0}, + {"PtraceRegs.R12", Field, 0}, + {"PtraceRegs.R13", Field, 0}, + {"PtraceRegs.R14", Field, 0}, + {"PtraceRegs.R15", Field, 0}, + {"PtraceRegs.R8", Field, 0}, + {"PtraceRegs.R9", Field, 0}, + {"PtraceRegs.Rax", Field, 0}, + {"PtraceRegs.Rbp", Field, 0}, + {"PtraceRegs.Rbx", Field, 0}, + {"PtraceRegs.Rcx", Field, 0}, + {"PtraceRegs.Rdi", Field, 0}, + {"PtraceRegs.Rdx", Field, 0}, + {"PtraceRegs.Rip", Field, 0}, + {"PtraceRegs.Rsi", Field, 0}, + {"PtraceRegs.Rsp", Field, 0}, + {"PtraceRegs.Ss", Field, 0}, + {"PtraceRegs.Uregs", Field, 0}, + {"PtraceRegs.Xcs", Field, 0}, + {"PtraceRegs.Xds", Field, 0}, + {"PtraceRegs.Xes", Field, 0}, + {"PtraceRegs.Xfs", Field, 0}, + {"PtraceRegs.Xgs", Field, 0}, + {"PtraceRegs.Xss", Field, 0}, + {"PtraceSetOptions", Func, 0}, + {"PtraceSetRegs", Func, 0}, + {"PtraceSingleStep", Func, 0}, + {"PtraceSyscall", Func, 1}, + {"Pwrite", Func, 0}, + {"REG_BINARY", Const, 0}, + {"REG_DWORD", Const, 0}, + {"REG_DWORD_BIG_ENDIAN", Const, 0}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_EXPAND_SZ", Const, 0}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0}, + {"REG_LINK", Const, 0}, + {"REG_MULTI_SZ", Const, 0}, + {"REG_NONE", Const, 0}, + {"REG_QWORD", Const, 0}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_RESOURCE_LIST", Const, 0}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0}, + {"REG_SZ", Const, 0}, + {"RLIMIT_AS", Const, 0}, + {"RLIMIT_CORE", Const, 0}, + {"RLIMIT_CPU", Const, 0}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16}, + {"RLIMIT_DATA", Const, 0}, + {"RLIMIT_FSIZE", Const, 0}, + {"RLIMIT_NOFILE", Const, 0}, + {"RLIMIT_STACK", Const, 0}, + {"RLIM_INFINITY", Const, 0}, + {"RTAX_ADVMSS", Const, 0}, + {"RTAX_AUTHOR", Const, 0}, + {"RTAX_BRD", Const, 0}, + {"RTAX_CWND", Const, 0}, + {"RTAX_DST", Const, 0}, + {"RTAX_FEATURES", Const, 0}, + {"RTAX_FEATURE_ALLFRAG", Const, 0}, + {"RTAX_FEATURE_ECN", Const, 0}, + {"RTAX_FEATURE_SACK", Const, 0}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0}, + {"RTAX_GATEWAY", Const, 0}, + {"RTAX_GENMASK", Const, 0}, + {"RTAX_HOPLIMIT", Const, 0}, + {"RTAX_IFA", Const, 0}, + {"RTAX_IFP", Const, 0}, + {"RTAX_INITCWND", Const, 0}, + {"RTAX_INITRWND", Const, 0}, + {"RTAX_LABEL", Const, 1}, + {"RTAX_LOCK", Const, 0}, + {"RTAX_MAX", Const, 0}, + {"RTAX_MTU", Const, 0}, + {"RTAX_NETMASK", Const, 0}, + {"RTAX_REORDERING", Const, 0}, + {"RTAX_RTO_MIN", Const, 0}, + {"RTAX_RTT", Const, 0}, + {"RTAX_RTTVAR", Const, 0}, + {"RTAX_SRC", Const, 1}, + {"RTAX_SRCMASK", Const, 1}, + {"RTAX_SSTHRESH", Const, 0}, + {"RTAX_TAG", Const, 1}, + {"RTAX_UNSPEC", Const, 0}, + {"RTAX_WINDOW", Const, 0}, + {"RTA_ALIGNTO", Const, 0}, + {"RTA_AUTHOR", Const, 0}, + {"RTA_BRD", Const, 0}, + {"RTA_CACHEINFO", Const, 0}, + {"RTA_DST", Const, 0}, + {"RTA_FLOW", Const, 0}, + {"RTA_GATEWAY", Const, 0}, + {"RTA_GENMASK", Const, 0}, + {"RTA_IFA", Const, 0}, + {"RTA_IFP", Const, 0}, + {"RTA_IIF", Const, 0}, + {"RTA_LABEL", Const, 1}, + {"RTA_MAX", Const, 0}, + {"RTA_METRICS", Const, 0}, + {"RTA_MULTIPATH", Const, 0}, + {"RTA_NETMASK", Const, 0}, + {"RTA_OIF", Const, 0}, + {"RTA_PREFSRC", Const, 0}, + {"RTA_PRIORITY", Const, 0}, + {"RTA_SRC", Const, 0}, + {"RTA_SRCMASK", Const, 1}, + {"RTA_TABLE", Const, 0}, + {"RTA_TAG", Const, 1}, + {"RTA_UNSPEC", Const, 0}, + {"RTCF_DIRECTSRC", Const, 0}, + {"RTCF_DOREDIRECT", Const, 0}, + {"RTCF_LOG", Const, 0}, + {"RTCF_MASQ", Const, 0}, + {"RTCF_NAT", Const, 0}, + {"RTCF_VALVE", Const, 0}, + {"RTF_ADDRCLASSMASK", Const, 0}, + {"RTF_ADDRCONF", Const, 0}, + {"RTF_ALLONLINK", Const, 0}, + {"RTF_ANNOUNCE", Const, 1}, + {"RTF_BLACKHOLE", Const, 0}, + {"RTF_BROADCAST", Const, 0}, + {"RTF_CACHE", Const, 0}, + {"RTF_CLONED", Const, 1}, + {"RTF_CLONING", Const, 0}, + {"RTF_CONDEMNED", Const, 0}, + {"RTF_DEFAULT", Const, 0}, + {"RTF_DELCLONE", Const, 0}, + {"RTF_DONE", Const, 0}, + {"RTF_DYNAMIC", Const, 0}, + {"RTF_FLOW", Const, 0}, + {"RTF_FMASK", Const, 0}, + {"RTF_GATEWAY", Const, 0}, + {"RTF_GWFLAG_COMPAT", Const, 3}, + {"RTF_HOST", Const, 0}, + {"RTF_IFREF", Const, 0}, + {"RTF_IFSCOPE", Const, 0}, + {"RTF_INTERFACE", Const, 0}, + {"RTF_IRTT", Const, 0}, + {"RTF_LINKRT", Const, 0}, + {"RTF_LLDATA", Const, 0}, + {"RTF_LLINFO", Const, 0}, + {"RTF_LOCAL", Const, 0}, + {"RTF_MASK", Const, 1}, + {"RTF_MODIFIED", Const, 0}, + {"RTF_MPATH", Const, 1}, + {"RTF_MPLS", Const, 1}, + {"RTF_MSS", Const, 0}, + {"RTF_MTU", Const, 0}, + {"RTF_MULTICAST", Const, 0}, + {"RTF_NAT", Const, 0}, + {"RTF_NOFORWARD", Const, 0}, + {"RTF_NONEXTHOP", Const, 0}, + {"RTF_NOPMTUDISC", Const, 0}, + {"RTF_PERMANENT_ARP", Const, 1}, + {"RTF_PINNED", Const, 0}, + {"RTF_POLICY", Const, 0}, + {"RTF_PRCLONING", Const, 0}, + {"RTF_PROTO1", Const, 0}, + {"RTF_PROTO2", Const, 0}, + {"RTF_PROTO3", Const, 0}, + {"RTF_PROXY", Const, 16}, + {"RTF_REINSTATE", Const, 0}, + {"RTF_REJECT", Const, 0}, + {"RTF_RNH_LOCKED", Const, 0}, + {"RTF_ROUTER", Const, 16}, + {"RTF_SOURCE", Const, 1}, + {"RTF_SRC", Const, 1}, + {"RTF_STATIC", Const, 0}, + {"RTF_STICKY", Const, 0}, + {"RTF_THROW", Const, 0}, + {"RTF_TUNNEL", Const, 1}, + {"RTF_UP", Const, 0}, + {"RTF_USETRAILERS", Const, 1}, + {"RTF_WASCLONED", Const, 0}, + {"RTF_WINDOW", Const, 0}, + {"RTF_XRESOLVE", Const, 0}, + {"RTM_ADD", Const, 0}, + {"RTM_BASE", Const, 0}, + {"RTM_CHANGE", Const, 0}, + {"RTM_CHGADDR", Const, 1}, + {"RTM_DELACTION", Const, 0}, + {"RTM_DELADDR", Const, 0}, + {"RTM_DELADDRLABEL", Const, 0}, + {"RTM_DELETE", Const, 0}, + {"RTM_DELLINK", Const, 0}, + {"RTM_DELMADDR", Const, 0}, + {"RTM_DELNEIGH", Const, 0}, + {"RTM_DELQDISC", Const, 0}, + {"RTM_DELROUTE", Const, 0}, + {"RTM_DELRULE", Const, 0}, + {"RTM_DELTCLASS", Const, 0}, + {"RTM_DELTFILTER", Const, 0}, + {"RTM_DESYNC", Const, 1}, + {"RTM_F_CLONED", Const, 0}, + {"RTM_F_EQUALIZE", Const, 0}, + {"RTM_F_NOTIFY", Const, 0}, + {"RTM_F_PREFIX", Const, 0}, + {"RTM_GET", Const, 0}, + {"RTM_GET2", Const, 0}, + {"RTM_GETACTION", Const, 0}, + {"RTM_GETADDR", Const, 0}, + {"RTM_GETADDRLABEL", Const, 0}, + {"RTM_GETANYCAST", Const, 0}, + {"RTM_GETDCB", Const, 0}, + {"RTM_GETLINK", Const, 0}, + {"RTM_GETMULTICAST", Const, 0}, + {"RTM_GETNEIGH", Const, 0}, + {"RTM_GETNEIGHTBL", Const, 0}, + {"RTM_GETQDISC", Const, 0}, + {"RTM_GETROUTE", Const, 0}, + {"RTM_GETRULE", Const, 0}, + {"RTM_GETTCLASS", Const, 0}, + {"RTM_GETTFILTER", Const, 0}, + {"RTM_IEEE80211", Const, 0}, + {"RTM_IFANNOUNCE", Const, 0}, + {"RTM_IFINFO", Const, 0}, + {"RTM_IFINFO2", Const, 0}, + {"RTM_LLINFO_UPD", Const, 1}, + {"RTM_LOCK", Const, 0}, + {"RTM_LOSING", Const, 0}, + {"RTM_MAX", Const, 0}, + {"RTM_MAXSIZE", Const, 1}, + {"RTM_MISS", Const, 0}, + {"RTM_NEWACTION", Const, 0}, + {"RTM_NEWADDR", Const, 0}, + {"RTM_NEWADDRLABEL", Const, 0}, + {"RTM_NEWLINK", Const, 0}, + {"RTM_NEWMADDR", Const, 0}, + {"RTM_NEWMADDR2", Const, 0}, + {"RTM_NEWNDUSEROPT", Const, 0}, + {"RTM_NEWNEIGH", Const, 0}, + {"RTM_NEWNEIGHTBL", Const, 0}, + {"RTM_NEWPREFIX", Const, 0}, + {"RTM_NEWQDISC", Const, 0}, + {"RTM_NEWROUTE", Const, 0}, + {"RTM_NEWRULE", Const, 0}, + {"RTM_NEWTCLASS", Const, 0}, + {"RTM_NEWTFILTER", Const, 0}, + {"RTM_NR_FAMILIES", Const, 0}, + {"RTM_NR_MSGTYPES", Const, 0}, + {"RTM_OIFINFO", Const, 1}, + {"RTM_OLDADD", Const, 0}, + {"RTM_OLDDEL", Const, 0}, + {"RTM_OOIFINFO", Const, 1}, + {"RTM_REDIRECT", Const, 0}, + {"RTM_RESOLVE", Const, 0}, + {"RTM_RTTUNIT", Const, 0}, + {"RTM_SETDCB", Const, 0}, + {"RTM_SETGATE", Const, 1}, + {"RTM_SETLINK", Const, 0}, + {"RTM_SETNEIGHTBL", Const, 0}, + {"RTM_VERSION", Const, 0}, + {"RTNH_ALIGNTO", Const, 0}, + {"RTNH_F_DEAD", Const, 0}, + {"RTNH_F_ONLINK", Const, 0}, + {"RTNH_F_PERVASIVE", Const, 0}, + {"RTNLGRP_IPV4_IFADDR", Const, 1}, + {"RTNLGRP_IPV4_MROUTE", Const, 1}, + {"RTNLGRP_IPV4_ROUTE", Const, 1}, + {"RTNLGRP_IPV4_RULE", Const, 1}, + {"RTNLGRP_IPV6_IFADDR", Const, 1}, + {"RTNLGRP_IPV6_IFINFO", Const, 1}, + {"RTNLGRP_IPV6_MROUTE", Const, 1}, + {"RTNLGRP_IPV6_PREFIX", Const, 1}, + {"RTNLGRP_IPV6_ROUTE", Const, 1}, + {"RTNLGRP_IPV6_RULE", Const, 1}, + {"RTNLGRP_LINK", Const, 1}, + {"RTNLGRP_ND_USEROPT", Const, 1}, + {"RTNLGRP_NEIGH", Const, 1}, + {"RTNLGRP_NONE", Const, 1}, + {"RTNLGRP_NOTIFY", Const, 1}, + {"RTNLGRP_TC", Const, 1}, + {"RTN_ANYCAST", Const, 0}, + {"RTN_BLACKHOLE", Const, 0}, + {"RTN_BROADCAST", Const, 0}, + {"RTN_LOCAL", Const, 0}, + {"RTN_MAX", Const, 0}, + {"RTN_MULTICAST", Const, 0}, + {"RTN_NAT", Const, 0}, + {"RTN_PROHIBIT", Const, 0}, + {"RTN_THROW", Const, 0}, + {"RTN_UNICAST", Const, 0}, + {"RTN_UNREACHABLE", Const, 0}, + {"RTN_UNSPEC", Const, 0}, + {"RTN_XRESOLVE", Const, 0}, + {"RTPROT_BIRD", Const, 0}, + {"RTPROT_BOOT", Const, 0}, + {"RTPROT_DHCP", Const, 0}, + {"RTPROT_DNROUTED", Const, 0}, + {"RTPROT_GATED", Const, 0}, + {"RTPROT_KERNEL", Const, 0}, + {"RTPROT_MRT", Const, 0}, + {"RTPROT_NTK", Const, 0}, + {"RTPROT_RA", Const, 0}, + {"RTPROT_REDIRECT", Const, 0}, + {"RTPROT_STATIC", Const, 0}, + {"RTPROT_UNSPEC", Const, 0}, + {"RTPROT_XORP", Const, 0}, + {"RTPROT_ZEBRA", Const, 0}, + {"RTV_EXPIRE", Const, 0}, + {"RTV_HOPCOUNT", Const, 0}, + {"RTV_MTU", Const, 0}, + {"RTV_RPIPE", Const, 0}, + {"RTV_RTT", Const, 0}, + {"RTV_RTTVAR", Const, 0}, + {"RTV_SPIPE", Const, 0}, + {"RTV_SSTHRESH", Const, 0}, + {"RTV_WEIGHT", Const, 0}, + {"RT_CACHING_CONTEXT", Const, 1}, + {"RT_CLASS_DEFAULT", Const, 0}, + {"RT_CLASS_LOCAL", Const, 0}, + {"RT_CLASS_MAIN", Const, 0}, + {"RT_CLASS_MAX", Const, 0}, + {"RT_CLASS_UNSPEC", Const, 0}, + {"RT_DEFAULT_FIB", Const, 1}, + {"RT_NORTREF", Const, 1}, + {"RT_SCOPE_HOST", Const, 0}, + {"RT_SCOPE_LINK", Const, 0}, + {"RT_SCOPE_NOWHERE", Const, 0}, + {"RT_SCOPE_SITE", Const, 0}, + {"RT_SCOPE_UNIVERSE", Const, 0}, + {"RT_TABLEID_MAX", Const, 1}, + {"RT_TABLE_COMPAT", Const, 0}, + {"RT_TABLE_DEFAULT", Const, 0}, + {"RT_TABLE_LOCAL", Const, 0}, + {"RT_TABLE_MAIN", Const, 0}, + {"RT_TABLE_MAX", Const, 0}, + {"RT_TABLE_UNSPEC", Const, 0}, + {"RUSAGE_CHILDREN", Const, 0}, + {"RUSAGE_SELF", Const, 0}, + {"RUSAGE_THREAD", Const, 0}, + {"Radvisory_t", Type, 0}, + {"Radvisory_t.Count", Field, 0}, + {"Radvisory_t.Offset", Field, 0}, + {"Radvisory_t.Pad_cgo_0", Field, 0}, + {"RawConn", Type, 9}, + {"RawSockaddr", Type, 0}, + {"RawSockaddr.Data", Field, 0}, + {"RawSockaddr.Family", Field, 0}, + {"RawSockaddr.Len", Field, 0}, + {"RawSockaddrAny", Type, 0}, + {"RawSockaddrAny.Addr", Field, 0}, + {"RawSockaddrAny.Pad", Field, 0}, + {"RawSockaddrDatalink", Type, 0}, + {"RawSockaddrDatalink.Alen", Field, 0}, + {"RawSockaddrDatalink.Data", Field, 0}, + {"RawSockaddrDatalink.Family", Field, 0}, + {"RawSockaddrDatalink.Index", Field, 0}, + {"RawSockaddrDatalink.Len", Field, 0}, + {"RawSockaddrDatalink.Nlen", Field, 0}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2}, + {"RawSockaddrDatalink.Slen", Field, 0}, + {"RawSockaddrDatalink.Type", Field, 0}, + {"RawSockaddrInet4", Type, 0}, + {"RawSockaddrInet4.Addr", Field, 0}, + {"RawSockaddrInet4.Family", Field, 0}, + {"RawSockaddrInet4.Len", Field, 0}, + {"RawSockaddrInet4.Port", Field, 0}, + {"RawSockaddrInet4.Zero", Field, 0}, + {"RawSockaddrInet6", Type, 0}, + {"RawSockaddrInet6.Addr", Field, 0}, + {"RawSockaddrInet6.Family", Field, 0}, + {"RawSockaddrInet6.Flowinfo", Field, 0}, + {"RawSockaddrInet6.Len", Field, 0}, + {"RawSockaddrInet6.Port", Field, 0}, + {"RawSockaddrInet6.Scope_id", Field, 0}, + {"RawSockaddrLinklayer", Type, 0}, + {"RawSockaddrLinklayer.Addr", Field, 0}, + {"RawSockaddrLinklayer.Family", Field, 0}, + {"RawSockaddrLinklayer.Halen", Field, 0}, + {"RawSockaddrLinklayer.Hatype", Field, 0}, + {"RawSockaddrLinklayer.Ifindex", Field, 0}, + {"RawSockaddrLinklayer.Pkttype", Field, 0}, + {"RawSockaddrLinklayer.Protocol", Field, 0}, + {"RawSockaddrNetlink", Type, 0}, + {"RawSockaddrNetlink.Family", Field, 0}, + {"RawSockaddrNetlink.Groups", Field, 0}, + {"RawSockaddrNetlink.Pad", Field, 0}, + {"RawSockaddrNetlink.Pid", Field, 0}, + {"RawSockaddrUnix", Type, 0}, + {"RawSockaddrUnix.Family", Field, 0}, + {"RawSockaddrUnix.Len", Field, 0}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2}, + {"RawSockaddrUnix.Path", Field, 0}, + {"RawSyscall", Func, 0}, + {"RawSyscall6", Func, 0}, + {"Read", Func, 0}, + {"ReadConsole", Func, 1}, + {"ReadDirectoryChanges", Func, 0}, + {"ReadDirent", Func, 0}, + {"ReadFile", Func, 0}, + {"Readlink", Func, 0}, + {"Reboot", Func, 0}, + {"Recvfrom", Func, 0}, + {"Recvmsg", Func, 0}, + {"RegCloseKey", Func, 0}, + {"RegEnumKeyEx", Func, 0}, + {"RegOpenKeyEx", Func, 0}, + {"RegQueryInfoKey", Func, 0}, + {"RegQueryValueEx", Func, 0}, + {"RemoveDirectory", Func, 0}, + {"Removexattr", Func, 1}, + {"Rename", Func, 0}, + {"Renameat", Func, 0}, + {"Revoke", Func, 0}, + {"Rlimit", Type, 0}, + {"Rlimit.Cur", Field, 0}, + {"Rlimit.Max", Field, 0}, + {"Rmdir", Func, 0}, + {"RouteMessage", Type, 0}, + {"RouteMessage.Data", Field, 0}, + {"RouteMessage.Header", Field, 0}, + {"RouteRIB", Func, 0}, + {"RoutingMessage", Type, 0}, + {"RtAttr", Type, 0}, + {"RtAttr.Len", Field, 0}, + {"RtAttr.Type", Field, 0}, + {"RtGenmsg", Type, 0}, + {"RtGenmsg.Family", Field, 0}, + {"RtMetrics", Type, 0}, + {"RtMetrics.Expire", Field, 0}, + {"RtMetrics.Filler", Field, 0}, + {"RtMetrics.Hopcount", Field, 0}, + {"RtMetrics.Locks", Field, 0}, + {"RtMetrics.Mtu", Field, 0}, + {"RtMetrics.Pad", Field, 3}, + {"RtMetrics.Pksent", Field, 0}, + {"RtMetrics.Recvpipe", Field, 0}, + {"RtMetrics.Refcnt", Field, 2}, + {"RtMetrics.Rtt", Field, 0}, + {"RtMetrics.Rttvar", Field, 0}, + {"RtMetrics.Sendpipe", Field, 0}, + {"RtMetrics.Ssthresh", Field, 0}, + {"RtMetrics.Weight", Field, 0}, + {"RtMsg", Type, 0}, + {"RtMsg.Dst_len", Field, 0}, + {"RtMsg.Family", Field, 0}, + {"RtMsg.Flags", Field, 0}, + {"RtMsg.Protocol", Field, 0}, + {"RtMsg.Scope", Field, 0}, + {"RtMsg.Src_len", Field, 0}, + {"RtMsg.Table", Field, 0}, + {"RtMsg.Tos", Field, 0}, + {"RtMsg.Type", Field, 0}, + {"RtMsghdr", Type, 0}, + {"RtMsghdr.Addrs", Field, 0}, + {"RtMsghdr.Errno", Field, 0}, + {"RtMsghdr.Flags", Field, 0}, + {"RtMsghdr.Fmask", Field, 0}, + {"RtMsghdr.Hdrlen", Field, 2}, + {"RtMsghdr.Index", Field, 0}, + {"RtMsghdr.Inits", Field, 0}, + {"RtMsghdr.Mpls", Field, 2}, + {"RtMsghdr.Msglen", Field, 0}, + {"RtMsghdr.Pad_cgo_0", Field, 0}, + {"RtMsghdr.Pad_cgo_1", Field, 2}, + {"RtMsghdr.Pid", Field, 0}, + {"RtMsghdr.Priority", Field, 2}, + {"RtMsghdr.Rmx", Field, 0}, + {"RtMsghdr.Seq", Field, 0}, + {"RtMsghdr.Tableid", Field, 2}, + {"RtMsghdr.Type", Field, 0}, + {"RtMsghdr.Use", Field, 0}, + {"RtMsghdr.Version", Field, 0}, + {"RtNexthop", Type, 0}, + {"RtNexthop.Flags", Field, 0}, + {"RtNexthop.Hops", Field, 0}, + {"RtNexthop.Ifindex", Field, 0}, + {"RtNexthop.Len", Field, 0}, + {"Rusage", Type, 0}, + {"Rusage.CreationTime", Field, 0}, + {"Rusage.ExitTime", Field, 0}, + {"Rusage.Idrss", Field, 0}, + {"Rusage.Inblock", Field, 0}, + {"Rusage.Isrss", Field, 0}, + {"Rusage.Ixrss", Field, 0}, + {"Rusage.KernelTime", Field, 0}, + {"Rusage.Majflt", Field, 0}, + {"Rusage.Maxrss", Field, 0}, + {"Rusage.Minflt", Field, 0}, + {"Rusage.Msgrcv", Field, 0}, + {"Rusage.Msgsnd", Field, 0}, + {"Rusage.Nivcsw", Field, 0}, + {"Rusage.Nsignals", Field, 0}, + {"Rusage.Nswap", Field, 0}, + {"Rusage.Nvcsw", Field, 0}, + {"Rusage.Oublock", Field, 0}, + {"Rusage.Stime", Field, 0}, + {"Rusage.UserTime", Field, 0}, + {"Rusage.Utime", Field, 0}, + {"SCM_BINTIME", Const, 0}, + {"SCM_CREDENTIALS", Const, 0}, + {"SCM_CREDS", Const, 0}, + {"SCM_RIGHTS", Const, 0}, + {"SCM_TIMESTAMP", Const, 0}, + {"SCM_TIMESTAMPING", Const, 0}, + {"SCM_TIMESTAMPNS", Const, 0}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0}, + {"SHUT_RD", Const, 0}, + {"SHUT_RDWR", Const, 0}, + {"SHUT_WR", Const, 0}, + {"SID", Type, 0}, + {"SIDAndAttributes", Type, 0}, + {"SIDAndAttributes.Attributes", Field, 0}, + {"SIDAndAttributes.Sid", Field, 0}, + {"SIGABRT", Const, 0}, + {"SIGALRM", Const, 0}, + {"SIGBUS", Const, 0}, + {"SIGCHLD", Const, 0}, + {"SIGCLD", Const, 0}, + {"SIGCONT", Const, 0}, + {"SIGEMT", Const, 0}, + {"SIGFPE", Const, 0}, + {"SIGHUP", Const, 0}, + {"SIGILL", Const, 0}, + {"SIGINFO", Const, 0}, + {"SIGINT", Const, 0}, + {"SIGIO", Const, 0}, + {"SIGIOT", Const, 0}, + {"SIGKILL", Const, 0}, + {"SIGLIBRT", Const, 1}, + {"SIGLWP", Const, 0}, + {"SIGPIPE", Const, 0}, + {"SIGPOLL", Const, 0}, + {"SIGPROF", Const, 0}, + {"SIGPWR", Const, 0}, + {"SIGQUIT", Const, 0}, + {"SIGSEGV", Const, 0}, + {"SIGSTKFLT", Const, 0}, + {"SIGSTOP", Const, 0}, + {"SIGSYS", Const, 0}, + {"SIGTERM", Const, 0}, + {"SIGTHR", Const, 0}, + {"SIGTRAP", Const, 0}, + {"SIGTSTP", Const, 0}, + {"SIGTTIN", Const, 0}, + {"SIGTTOU", Const, 0}, + {"SIGUNUSED", Const, 0}, + {"SIGURG", Const, 0}, + {"SIGUSR1", Const, 0}, + {"SIGUSR2", Const, 0}, + {"SIGVTALRM", Const, 0}, + {"SIGWINCH", Const, 0}, + {"SIGXCPU", Const, 0}, + {"SIGXFSZ", Const, 0}, + {"SIOCADDDLCI", Const, 0}, + {"SIOCADDMULTI", Const, 0}, + {"SIOCADDRT", Const, 0}, + {"SIOCAIFADDR", Const, 0}, + {"SIOCAIFGROUP", Const, 0}, + {"SIOCALIFADDR", Const, 0}, + {"SIOCARPIPLL", Const, 0}, + {"SIOCATMARK", Const, 0}, + {"SIOCAUTOADDR", Const, 0}, + {"SIOCAUTONETMASK", Const, 0}, + {"SIOCBRDGADD", Const, 1}, + {"SIOCBRDGADDS", Const, 1}, + {"SIOCBRDGARL", Const, 1}, + {"SIOCBRDGDADDR", Const, 1}, + {"SIOCBRDGDEL", Const, 1}, + {"SIOCBRDGDELS", Const, 1}, + {"SIOCBRDGFLUSH", Const, 1}, + {"SIOCBRDGFRL", Const, 1}, + {"SIOCBRDGGCACHE", Const, 1}, + {"SIOCBRDGGFD", Const, 1}, + {"SIOCBRDGGHT", Const, 1}, + {"SIOCBRDGGIFFLGS", Const, 1}, + {"SIOCBRDGGMA", Const, 1}, + {"SIOCBRDGGPARAM", Const, 1}, + {"SIOCBRDGGPRI", Const, 1}, + {"SIOCBRDGGRL", Const, 1}, + {"SIOCBRDGGSIFS", Const, 1}, + {"SIOCBRDGGTO", Const, 1}, + {"SIOCBRDGIFS", Const, 1}, + {"SIOCBRDGRTS", Const, 1}, + {"SIOCBRDGSADDR", Const, 1}, + {"SIOCBRDGSCACHE", Const, 1}, + {"SIOCBRDGSFD", Const, 1}, + {"SIOCBRDGSHT", Const, 1}, + {"SIOCBRDGSIFCOST", Const, 1}, + {"SIOCBRDGSIFFLGS", Const, 1}, + {"SIOCBRDGSIFPRIO", Const, 1}, + {"SIOCBRDGSMA", Const, 1}, + {"SIOCBRDGSPRI", Const, 1}, + {"SIOCBRDGSPROTO", Const, 1}, + {"SIOCBRDGSTO", Const, 1}, + {"SIOCBRDGSTXHC", Const, 1}, + {"SIOCDARP", Const, 0}, + {"SIOCDELDLCI", Const, 0}, + {"SIOCDELMULTI", Const, 0}, + {"SIOCDELRT", Const, 0}, + {"SIOCDEVPRIVATE", Const, 0}, + {"SIOCDIFADDR", Const, 0}, + {"SIOCDIFGROUP", Const, 0}, + {"SIOCDIFPHYADDR", Const, 0}, + {"SIOCDLIFADDR", Const, 0}, + {"SIOCDRARP", Const, 0}, + {"SIOCGARP", Const, 0}, + {"SIOCGDRVSPEC", Const, 0}, + {"SIOCGETKALIVE", Const, 1}, + {"SIOCGETLABEL", Const, 1}, + {"SIOCGETPFLOW", Const, 1}, + {"SIOCGETPFSYNC", Const, 1}, + {"SIOCGETSGCNT", Const, 0}, + {"SIOCGETVIFCNT", Const, 0}, + {"SIOCGETVLAN", Const, 0}, + {"SIOCGHIWAT", Const, 0}, + {"SIOCGIFADDR", Const, 0}, + {"SIOCGIFADDRPREF", Const, 1}, + {"SIOCGIFALIAS", Const, 1}, + {"SIOCGIFALTMTU", Const, 0}, + {"SIOCGIFASYNCMAP", Const, 0}, + {"SIOCGIFBOND", Const, 0}, + {"SIOCGIFBR", Const, 0}, + {"SIOCGIFBRDADDR", Const, 0}, + {"SIOCGIFCAP", Const, 0}, + {"SIOCGIFCONF", Const, 0}, + {"SIOCGIFCOUNT", Const, 0}, + {"SIOCGIFDATA", Const, 1}, + {"SIOCGIFDESCR", Const, 0}, + {"SIOCGIFDEVMTU", Const, 0}, + {"SIOCGIFDLT", Const, 1}, + {"SIOCGIFDSTADDR", Const, 0}, + {"SIOCGIFENCAP", Const, 0}, + {"SIOCGIFFIB", Const, 1}, + {"SIOCGIFFLAGS", Const, 0}, + {"SIOCGIFGATTR", Const, 1}, + {"SIOCGIFGENERIC", Const, 0}, + {"SIOCGIFGMEMB", Const, 0}, + {"SIOCGIFGROUP", Const, 0}, + {"SIOCGIFHARDMTU", Const, 3}, + {"SIOCGIFHWADDR", Const, 0}, + {"SIOCGIFINDEX", Const, 0}, + {"SIOCGIFKPI", Const, 0}, + {"SIOCGIFMAC", Const, 0}, + {"SIOCGIFMAP", Const, 0}, + {"SIOCGIFMEDIA", Const, 0}, + {"SIOCGIFMEM", Const, 0}, + {"SIOCGIFMETRIC", Const, 0}, + {"SIOCGIFMTU", Const, 0}, + {"SIOCGIFNAME", Const, 0}, + {"SIOCGIFNETMASK", Const, 0}, + {"SIOCGIFPDSTADDR", Const, 0}, + {"SIOCGIFPFLAGS", Const, 0}, + {"SIOCGIFPHYS", Const, 0}, + {"SIOCGIFPRIORITY", Const, 1}, + {"SIOCGIFPSRCADDR", Const, 0}, + {"SIOCGIFRDOMAIN", Const, 1}, + {"SIOCGIFRTLABEL", Const, 1}, + {"SIOCGIFSLAVE", Const, 0}, + {"SIOCGIFSTATUS", Const, 0}, + {"SIOCGIFTIMESLOT", Const, 1}, + {"SIOCGIFTXQLEN", Const, 0}, + {"SIOCGIFVLAN", Const, 0}, + {"SIOCGIFWAKEFLAGS", Const, 0}, + {"SIOCGIFXFLAGS", Const, 1}, + {"SIOCGLIFADDR", Const, 0}, + {"SIOCGLIFPHYADDR", Const, 0}, + {"SIOCGLIFPHYRTABLE", Const, 1}, + {"SIOCGLIFPHYTTL", Const, 3}, + {"SIOCGLINKSTR", Const, 1}, + {"SIOCGLOWAT", Const, 0}, + {"SIOCGPGRP", Const, 0}, + {"SIOCGPRIVATE_0", Const, 0}, + {"SIOCGPRIVATE_1", Const, 0}, + {"SIOCGRARP", Const, 0}, + {"SIOCGSPPPPARAMS", Const, 3}, + {"SIOCGSTAMP", Const, 0}, + {"SIOCGSTAMPNS", Const, 0}, + {"SIOCGVH", Const, 1}, + {"SIOCGVNETID", Const, 3}, + {"SIOCIFCREATE", Const, 0}, + {"SIOCIFCREATE2", Const, 0}, + {"SIOCIFDESTROY", Const, 0}, + {"SIOCIFGCLONERS", Const, 0}, + {"SIOCINITIFADDR", Const, 1}, + {"SIOCPROTOPRIVATE", Const, 0}, + {"SIOCRSLVMULTI", Const, 0}, + {"SIOCRTMSG", Const, 0}, + {"SIOCSARP", Const, 0}, + {"SIOCSDRVSPEC", Const, 0}, + {"SIOCSETKALIVE", Const, 1}, + {"SIOCSETLABEL", Const, 1}, + {"SIOCSETPFLOW", Const, 1}, + {"SIOCSETPFSYNC", Const, 1}, + {"SIOCSETVLAN", Const, 0}, + {"SIOCSHIWAT", Const, 0}, + {"SIOCSIFADDR", Const, 0}, + {"SIOCSIFADDRPREF", Const, 1}, + {"SIOCSIFALTMTU", Const, 0}, + {"SIOCSIFASYNCMAP", Const, 0}, + {"SIOCSIFBOND", Const, 0}, + {"SIOCSIFBR", Const, 0}, + {"SIOCSIFBRDADDR", Const, 0}, + {"SIOCSIFCAP", Const, 0}, + {"SIOCSIFDESCR", Const, 0}, + {"SIOCSIFDSTADDR", Const, 0}, + {"SIOCSIFENCAP", Const, 0}, + {"SIOCSIFFIB", Const, 1}, + {"SIOCSIFFLAGS", Const, 0}, + {"SIOCSIFGATTR", Const, 1}, + {"SIOCSIFGENERIC", Const, 0}, + {"SIOCSIFHWADDR", Const, 0}, + {"SIOCSIFHWBROADCAST", Const, 0}, + {"SIOCSIFKPI", Const, 0}, + {"SIOCSIFLINK", Const, 0}, + {"SIOCSIFLLADDR", Const, 0}, + {"SIOCSIFMAC", Const, 0}, + {"SIOCSIFMAP", Const, 0}, + {"SIOCSIFMEDIA", Const, 0}, + {"SIOCSIFMEM", Const, 0}, + {"SIOCSIFMETRIC", Const, 0}, + {"SIOCSIFMTU", Const, 0}, + {"SIOCSIFNAME", Const, 0}, + {"SIOCSIFNETMASK", Const, 0}, + {"SIOCSIFPFLAGS", Const, 0}, + {"SIOCSIFPHYADDR", Const, 0}, + {"SIOCSIFPHYS", Const, 0}, + {"SIOCSIFPRIORITY", Const, 1}, + {"SIOCSIFRDOMAIN", Const, 1}, + {"SIOCSIFRTLABEL", Const, 1}, + {"SIOCSIFRVNET", Const, 0}, + {"SIOCSIFSLAVE", Const, 0}, + {"SIOCSIFTIMESLOT", Const, 1}, + {"SIOCSIFTXQLEN", Const, 0}, + {"SIOCSIFVLAN", Const, 0}, + {"SIOCSIFVNET", Const, 0}, + {"SIOCSIFXFLAGS", Const, 1}, + {"SIOCSLIFPHYADDR", Const, 0}, + {"SIOCSLIFPHYRTABLE", Const, 1}, + {"SIOCSLIFPHYTTL", Const, 3}, + {"SIOCSLINKSTR", Const, 1}, + {"SIOCSLOWAT", Const, 0}, + {"SIOCSPGRP", Const, 0}, + {"SIOCSRARP", Const, 0}, + {"SIOCSSPPPPARAMS", Const, 3}, + {"SIOCSVH", Const, 1}, + {"SIOCSVNETID", Const, 3}, + {"SIOCZIFDATA", Const, 1}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1}, + {"SIO_GET_INTERFACE_LIST", Const, 0}, + {"SIO_KEEPALIVE_VALS", Const, 3}, + {"SIO_UDP_CONNRESET", Const, 4}, + {"SOCK_CLOEXEC", Const, 0}, + {"SOCK_DCCP", Const, 0}, + {"SOCK_DGRAM", Const, 0}, + {"SOCK_FLAGS_MASK", Const, 1}, + {"SOCK_MAXADDRLEN", Const, 0}, + {"SOCK_NONBLOCK", Const, 0}, + {"SOCK_NOSIGPIPE", Const, 1}, + {"SOCK_PACKET", Const, 0}, + {"SOCK_RAW", Const, 0}, + {"SOCK_RDM", Const, 0}, + {"SOCK_SEQPACKET", Const, 0}, + {"SOCK_STREAM", Const, 0}, + {"SOL_AAL", Const, 0}, + {"SOL_ATM", Const, 0}, + {"SOL_DECNET", Const, 0}, + {"SOL_ICMPV6", Const, 0}, + {"SOL_IP", Const, 0}, + {"SOL_IPV6", Const, 0}, + {"SOL_IRDA", Const, 0}, + {"SOL_PACKET", Const, 0}, + {"SOL_RAW", Const, 0}, + {"SOL_SOCKET", Const, 0}, + {"SOL_TCP", Const, 0}, + {"SOL_X25", Const, 0}, + {"SOMAXCONN", Const, 0}, + {"SO_ACCEPTCONN", Const, 0}, + {"SO_ACCEPTFILTER", Const, 0}, + {"SO_ATTACH_FILTER", Const, 0}, + {"SO_BINDANY", Const, 1}, + {"SO_BINDTODEVICE", Const, 0}, + {"SO_BINTIME", Const, 0}, + {"SO_BROADCAST", Const, 0}, + {"SO_BSDCOMPAT", Const, 0}, + {"SO_DEBUG", Const, 0}, + {"SO_DETACH_FILTER", Const, 0}, + {"SO_DOMAIN", Const, 0}, + {"SO_DONTROUTE", Const, 0}, + {"SO_DONTTRUNC", Const, 0}, + {"SO_ERROR", Const, 0}, + {"SO_KEEPALIVE", Const, 0}, + {"SO_LABEL", Const, 0}, + {"SO_LINGER", Const, 0}, + {"SO_LINGER_SEC", Const, 0}, + {"SO_LISTENINCQLEN", Const, 0}, + {"SO_LISTENQLEN", Const, 0}, + {"SO_LISTENQLIMIT", Const, 0}, + {"SO_MARK", Const, 0}, + {"SO_NETPROC", Const, 1}, + {"SO_NKE", Const, 0}, + {"SO_NOADDRERR", Const, 0}, + {"SO_NOHEADER", Const, 1}, + {"SO_NOSIGPIPE", Const, 0}, + {"SO_NOTIFYCONFLICT", Const, 0}, + {"SO_NO_CHECK", Const, 0}, + {"SO_NO_DDP", Const, 0}, + {"SO_NO_OFFLOAD", Const, 0}, + {"SO_NP_EXTENSIONS", Const, 0}, + {"SO_NREAD", Const, 0}, + {"SO_NUMRCVPKT", Const, 16}, + {"SO_NWRITE", Const, 0}, + {"SO_OOBINLINE", Const, 0}, + {"SO_OVERFLOWED", Const, 1}, + {"SO_PASSCRED", Const, 0}, + {"SO_PASSSEC", Const, 0}, + {"SO_PEERCRED", Const, 0}, + {"SO_PEERLABEL", Const, 0}, + {"SO_PEERNAME", Const, 0}, + {"SO_PEERSEC", Const, 0}, + {"SO_PRIORITY", Const, 0}, + {"SO_PROTOCOL", Const, 0}, + {"SO_PROTOTYPE", Const, 1}, + {"SO_RANDOMPORT", Const, 0}, + {"SO_RCVBUF", Const, 0}, + {"SO_RCVBUFFORCE", Const, 0}, + {"SO_RCVLOWAT", Const, 0}, + {"SO_RCVTIMEO", Const, 0}, + {"SO_RESTRICTIONS", Const, 0}, + {"SO_RESTRICT_DENYIN", Const, 0}, + {"SO_RESTRICT_DENYOUT", Const, 0}, + {"SO_RESTRICT_DENYSET", Const, 0}, + {"SO_REUSEADDR", Const, 0}, + {"SO_REUSEPORT", Const, 0}, + {"SO_REUSESHAREUID", Const, 0}, + {"SO_RTABLE", Const, 1}, + {"SO_RXQ_OVFL", Const, 0}, + {"SO_SECURITY_AUTHENTICATION", Const, 0}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0}, + {"SO_SETFIB", Const, 0}, + {"SO_SNDBUF", Const, 0}, + {"SO_SNDBUFFORCE", Const, 0}, + {"SO_SNDLOWAT", Const, 0}, + {"SO_SNDTIMEO", Const, 0}, + {"SO_SPLICE", Const, 1}, + {"SO_TIMESTAMP", Const, 0}, + {"SO_TIMESTAMPING", Const, 0}, + {"SO_TIMESTAMPNS", Const, 0}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0}, + {"SO_TYPE", Const, 0}, + {"SO_UPCALLCLOSEWAIT", Const, 0}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1}, + {"SO_USELOOPBACK", Const, 0}, + {"SO_USER_COOKIE", Const, 1}, + {"SO_VENDOR", Const, 3}, + {"SO_WANTMORE", Const, 0}, + {"SO_WANTOOBFLAG", Const, 0}, + {"SSLExtraCertChainPolicyPara", Type, 0}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0}, + {"STANDARD_RIGHTS_ALL", Const, 0}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0}, + {"STANDARD_RIGHTS_READ", Const, 0}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0}, + {"STANDARD_RIGHTS_WRITE", Const, 0}, + {"STARTF_USESHOWWINDOW", Const, 0}, + {"STARTF_USESTDHANDLES", Const, 0}, + {"STD_ERROR_HANDLE", Const, 0}, + {"STD_INPUT_HANDLE", Const, 0}, + {"STD_OUTPUT_HANDLE", Const, 0}, + {"SUBLANG_ENGLISH_US", Const, 0}, + {"SW_FORCEMINIMIZE", Const, 0}, + {"SW_HIDE", Const, 0}, + {"SW_MAXIMIZE", Const, 0}, + {"SW_MINIMIZE", Const, 0}, + {"SW_NORMAL", Const, 0}, + {"SW_RESTORE", Const, 0}, + {"SW_SHOW", Const, 0}, + {"SW_SHOWDEFAULT", Const, 0}, + {"SW_SHOWMAXIMIZED", Const, 0}, + {"SW_SHOWMINIMIZED", Const, 0}, + {"SW_SHOWMINNOACTIVE", Const, 0}, + {"SW_SHOWNA", Const, 0}, + {"SW_SHOWNOACTIVATE", Const, 0}, + {"SW_SHOWNORMAL", Const, 0}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4}, + {"SYNCHRONIZE", Const, 0}, + {"SYSCTL_VERSION", Const, 1}, + {"SYSCTL_VERS_0", Const, 1}, + {"SYSCTL_VERS_1", Const, 1}, + {"SYSCTL_VERS_MASK", Const, 1}, + {"SYS_ABORT2", Const, 0}, + {"SYS_ACCEPT", Const, 0}, + {"SYS_ACCEPT4", Const, 0}, + {"SYS_ACCEPT_NOCANCEL", Const, 0}, + {"SYS_ACCESS", Const, 0}, + {"SYS_ACCESS_EXTENDED", Const, 0}, + {"SYS_ACCT", Const, 0}, + {"SYS_ADD_KEY", Const, 0}, + {"SYS_ADD_PROFIL", Const, 0}, + {"SYS_ADJFREQ", Const, 1}, + {"SYS_ADJTIME", Const, 0}, + {"SYS_ADJTIMEX", Const, 0}, + {"SYS_AFS_SYSCALL", Const, 0}, + {"SYS_AIO_CANCEL", Const, 0}, + {"SYS_AIO_ERROR", Const, 0}, + {"SYS_AIO_FSYNC", Const, 0}, + {"SYS_AIO_MLOCK", Const, 14}, + {"SYS_AIO_READ", Const, 0}, + {"SYS_AIO_RETURN", Const, 0}, + {"SYS_AIO_SUSPEND", Const, 0}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0}, + {"SYS_AIO_WAITCOMPLETE", Const, 14}, + {"SYS_AIO_WRITE", Const, 0}, + {"SYS_ALARM", Const, 0}, + {"SYS_ARCH_PRCTL", Const, 0}, + {"SYS_ARM_FADVISE64_64", Const, 0}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0}, + {"SYS_ATGETMSG", Const, 0}, + {"SYS_ATPGETREQ", Const, 0}, + {"SYS_ATPGETRSP", Const, 0}, + {"SYS_ATPSNDREQ", Const, 0}, + {"SYS_ATPSNDRSP", Const, 0}, + {"SYS_ATPUTMSG", Const, 0}, + {"SYS_ATSOCKET", Const, 0}, + {"SYS_AUDIT", Const, 0}, + {"SYS_AUDITCTL", Const, 0}, + {"SYS_AUDITON", Const, 0}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0}, + {"SYS_AUDIT_SESSION_PORT", Const, 0}, + {"SYS_AUDIT_SESSION_SELF", Const, 0}, + {"SYS_BDFLUSH", Const, 0}, + {"SYS_BIND", Const, 0}, + {"SYS_BINDAT", Const, 3}, + {"SYS_BREAK", Const, 0}, + {"SYS_BRK", Const, 0}, + {"SYS_BSDTHREAD_CREATE", Const, 0}, + {"SYS_BSDTHREAD_REGISTER", Const, 0}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0}, + {"SYS_CAPGET", Const, 0}, + {"SYS_CAPSET", Const, 0}, + {"SYS_CAP_ENTER", Const, 0}, + {"SYS_CAP_FCNTLS_GET", Const, 1}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1}, + {"SYS_CAP_GETMODE", Const, 0}, + {"SYS_CAP_GETRIGHTS", Const, 0}, + {"SYS_CAP_IOCTLS_GET", Const, 1}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1}, + {"SYS_CAP_NEW", Const, 0}, + {"SYS_CAP_RIGHTS_GET", Const, 1}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1}, + {"SYS_CHDIR", Const, 0}, + {"SYS_CHFLAGS", Const, 0}, + {"SYS_CHFLAGSAT", Const, 3}, + {"SYS_CHMOD", Const, 0}, + {"SYS_CHMOD_EXTENDED", Const, 0}, + {"SYS_CHOWN", Const, 0}, + {"SYS_CHOWN32", Const, 0}, + {"SYS_CHROOT", Const, 0}, + {"SYS_CHUD", Const, 0}, + {"SYS_CLOCK_ADJTIME", Const, 0}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1}, + {"SYS_CLOCK_GETRES", Const, 0}, + {"SYS_CLOCK_GETTIME", Const, 0}, + {"SYS_CLOCK_NANOSLEEP", Const, 0}, + {"SYS_CLOCK_SETTIME", Const, 0}, + {"SYS_CLONE", Const, 0}, + {"SYS_CLOSE", Const, 0}, + {"SYS_CLOSEFROM", Const, 0}, + {"SYS_CLOSE_NOCANCEL", Const, 0}, + {"SYS_CONNECT", Const, 0}, + {"SYS_CONNECTAT", Const, 3}, + {"SYS_CONNECT_NOCANCEL", Const, 0}, + {"SYS_COPYFILE", Const, 0}, + {"SYS_CPUSET", Const, 0}, + {"SYS_CPUSET_GETAFFINITY", Const, 0}, + {"SYS_CPUSET_GETID", Const, 0}, + {"SYS_CPUSET_SETAFFINITY", Const, 0}, + {"SYS_CPUSET_SETID", Const, 0}, + {"SYS_CREAT", Const, 0}, + {"SYS_CREATE_MODULE", Const, 0}, + {"SYS_CSOPS", Const, 0}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16}, + {"SYS_DELETE", Const, 0}, + {"SYS_DELETE_MODULE", Const, 0}, + {"SYS_DUP", Const, 0}, + {"SYS_DUP2", Const, 0}, + {"SYS_DUP3", Const, 0}, + {"SYS_EACCESS", Const, 0}, + {"SYS_EPOLL_CREATE", Const, 0}, + {"SYS_EPOLL_CREATE1", Const, 0}, + {"SYS_EPOLL_CTL", Const, 0}, + {"SYS_EPOLL_CTL_OLD", Const, 0}, + {"SYS_EPOLL_PWAIT", Const, 0}, + {"SYS_EPOLL_WAIT", Const, 0}, + {"SYS_EPOLL_WAIT_OLD", Const, 0}, + {"SYS_EVENTFD", Const, 0}, + {"SYS_EVENTFD2", Const, 0}, + {"SYS_EXCHANGEDATA", Const, 0}, + {"SYS_EXECVE", Const, 0}, + {"SYS_EXIT", Const, 0}, + {"SYS_EXIT_GROUP", Const, 0}, + {"SYS_EXTATTRCTL", Const, 0}, + {"SYS_EXTATTR_DELETE_FD", Const, 0}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0}, + {"SYS_EXTATTR_GET_FD", Const, 0}, + {"SYS_EXTATTR_GET_FILE", Const, 0}, + {"SYS_EXTATTR_GET_LINK", Const, 0}, + {"SYS_EXTATTR_LIST_FD", Const, 0}, + {"SYS_EXTATTR_LIST_FILE", Const, 0}, + {"SYS_EXTATTR_LIST_LINK", Const, 0}, + {"SYS_EXTATTR_SET_FD", Const, 0}, + {"SYS_EXTATTR_SET_FILE", Const, 0}, + {"SYS_EXTATTR_SET_LINK", Const, 0}, + {"SYS_FACCESSAT", Const, 0}, + {"SYS_FADVISE64", Const, 0}, + {"SYS_FADVISE64_64", Const, 0}, + {"SYS_FALLOCATE", Const, 0}, + {"SYS_FANOTIFY_INIT", Const, 0}, + {"SYS_FANOTIFY_MARK", Const, 0}, + {"SYS_FCHDIR", Const, 0}, + {"SYS_FCHFLAGS", Const, 0}, + {"SYS_FCHMOD", Const, 0}, + {"SYS_FCHMODAT", Const, 0}, + {"SYS_FCHMOD_EXTENDED", Const, 0}, + {"SYS_FCHOWN", Const, 0}, + {"SYS_FCHOWN32", Const, 0}, + {"SYS_FCHOWNAT", Const, 0}, + {"SYS_FCHROOT", Const, 1}, + {"SYS_FCNTL", Const, 0}, + {"SYS_FCNTL64", Const, 0}, + {"SYS_FCNTL_NOCANCEL", Const, 0}, + {"SYS_FDATASYNC", Const, 0}, + {"SYS_FEXECVE", Const, 0}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0}, + {"SYS_FFSCTL", Const, 0}, + {"SYS_FGETATTRLIST", Const, 0}, + {"SYS_FGETXATTR", Const, 0}, + {"SYS_FHOPEN", Const, 0}, + {"SYS_FHSTAT", Const, 0}, + {"SYS_FHSTATFS", Const, 0}, + {"SYS_FILEPORT_MAKEFD", Const, 0}, + {"SYS_FILEPORT_MAKEPORT", Const, 0}, + {"SYS_FKTRACE", Const, 1}, + {"SYS_FLISTXATTR", Const, 0}, + {"SYS_FLOCK", Const, 0}, + {"SYS_FORK", Const, 0}, + {"SYS_FPATHCONF", Const, 0}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0}, + {"SYS_FREEBSD6_LSEEK", Const, 0}, + {"SYS_FREEBSD6_MMAP", Const, 0}, + {"SYS_FREEBSD6_PREAD", Const, 0}, + {"SYS_FREEBSD6_PWRITE", Const, 0}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0}, + {"SYS_FREMOVEXATTR", Const, 0}, + {"SYS_FSCTL", Const, 0}, + {"SYS_FSETATTRLIST", Const, 0}, + {"SYS_FSETXATTR", Const, 0}, + {"SYS_FSGETPATH", Const, 0}, + {"SYS_FSTAT", Const, 0}, + {"SYS_FSTAT64", Const, 0}, + {"SYS_FSTAT64_EXTENDED", Const, 0}, + {"SYS_FSTATAT", Const, 0}, + {"SYS_FSTATAT64", Const, 0}, + {"SYS_FSTATFS", Const, 0}, + {"SYS_FSTATFS64", Const, 0}, + {"SYS_FSTATV", Const, 0}, + {"SYS_FSTATVFS1", Const, 1}, + {"SYS_FSTAT_EXTENDED", Const, 0}, + {"SYS_FSYNC", Const, 0}, + {"SYS_FSYNC_NOCANCEL", Const, 0}, + {"SYS_FSYNC_RANGE", Const, 1}, + {"SYS_FTIME", Const, 0}, + {"SYS_FTRUNCATE", Const, 0}, + {"SYS_FTRUNCATE64", Const, 0}, + {"SYS_FUTEX", Const, 0}, + {"SYS_FUTIMENS", Const, 1}, + {"SYS_FUTIMES", Const, 0}, + {"SYS_FUTIMESAT", Const, 0}, + {"SYS_GETATTRLIST", Const, 0}, + {"SYS_GETAUDIT", Const, 0}, + {"SYS_GETAUDIT_ADDR", Const, 0}, + {"SYS_GETAUID", Const, 0}, + {"SYS_GETCONTEXT", Const, 0}, + {"SYS_GETCPU", Const, 0}, + {"SYS_GETCWD", Const, 0}, + {"SYS_GETDENTS", Const, 0}, + {"SYS_GETDENTS64", Const, 0}, + {"SYS_GETDIRENTRIES", Const, 0}, + {"SYS_GETDIRENTRIES64", Const, 0}, + {"SYS_GETDIRENTRIESATTR", Const, 0}, + {"SYS_GETDTABLECOUNT", Const, 1}, + {"SYS_GETDTABLESIZE", Const, 0}, + {"SYS_GETEGID", Const, 0}, + {"SYS_GETEGID32", Const, 0}, + {"SYS_GETEUID", Const, 0}, + {"SYS_GETEUID32", Const, 0}, + {"SYS_GETFH", Const, 0}, + {"SYS_GETFSSTAT", Const, 0}, + {"SYS_GETFSSTAT64", Const, 0}, + {"SYS_GETGID", Const, 0}, + {"SYS_GETGID32", Const, 0}, + {"SYS_GETGROUPS", Const, 0}, + {"SYS_GETGROUPS32", Const, 0}, + {"SYS_GETHOSTUUID", Const, 0}, + {"SYS_GETITIMER", Const, 0}, + {"SYS_GETLCID", Const, 0}, + {"SYS_GETLOGIN", Const, 0}, + {"SYS_GETLOGINCLASS", Const, 0}, + {"SYS_GETPEERNAME", Const, 0}, + {"SYS_GETPGID", Const, 0}, + {"SYS_GETPGRP", Const, 0}, + {"SYS_GETPID", Const, 0}, + {"SYS_GETPMSG", Const, 0}, + {"SYS_GETPPID", Const, 0}, + {"SYS_GETPRIORITY", Const, 0}, + {"SYS_GETRESGID", Const, 0}, + {"SYS_GETRESGID32", Const, 0}, + {"SYS_GETRESUID", Const, 0}, + {"SYS_GETRESUID32", Const, 0}, + {"SYS_GETRLIMIT", Const, 0}, + {"SYS_GETRTABLE", Const, 1}, + {"SYS_GETRUSAGE", Const, 0}, + {"SYS_GETSGROUPS", Const, 0}, + {"SYS_GETSID", Const, 0}, + {"SYS_GETSOCKNAME", Const, 0}, + {"SYS_GETSOCKOPT", Const, 0}, + {"SYS_GETTHRID", Const, 1}, + {"SYS_GETTID", Const, 0}, + {"SYS_GETTIMEOFDAY", Const, 0}, + {"SYS_GETUID", Const, 0}, + {"SYS_GETUID32", Const, 0}, + {"SYS_GETVFSSTAT", Const, 1}, + {"SYS_GETWGROUPS", Const, 0}, + {"SYS_GETXATTR", Const, 0}, + {"SYS_GET_KERNEL_SYMS", Const, 0}, + {"SYS_GET_MEMPOLICY", Const, 0}, + {"SYS_GET_ROBUST_LIST", Const, 0}, + {"SYS_GET_THREAD_AREA", Const, 0}, + {"SYS_GSSD_SYSCALL", Const, 14}, + {"SYS_GTTY", Const, 0}, + {"SYS_IDENTITYSVC", Const, 0}, + {"SYS_IDLE", Const, 0}, + {"SYS_INITGROUPS", Const, 0}, + {"SYS_INIT_MODULE", Const, 0}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0}, + {"SYS_INOTIFY_INIT", Const, 0}, + {"SYS_INOTIFY_INIT1", Const, 0}, + {"SYS_INOTIFY_RM_WATCH", Const, 0}, + {"SYS_IOCTL", Const, 0}, + {"SYS_IOPERM", Const, 0}, + {"SYS_IOPL", Const, 0}, + {"SYS_IOPOLICYSYS", Const, 0}, + {"SYS_IOPRIO_GET", Const, 0}, + {"SYS_IOPRIO_SET", Const, 0}, + {"SYS_IO_CANCEL", Const, 0}, + {"SYS_IO_DESTROY", Const, 0}, + {"SYS_IO_GETEVENTS", Const, 0}, + {"SYS_IO_SETUP", Const, 0}, + {"SYS_IO_SUBMIT", Const, 0}, + {"SYS_IPC", Const, 0}, + {"SYS_ISSETUGID", Const, 0}, + {"SYS_JAIL", Const, 0}, + {"SYS_JAIL_ATTACH", Const, 0}, + {"SYS_JAIL_GET", Const, 0}, + {"SYS_JAIL_REMOVE", Const, 0}, + {"SYS_JAIL_SET", Const, 0}, + {"SYS_KAS_INFO", Const, 16}, + {"SYS_KDEBUG_TRACE", Const, 0}, + {"SYS_KENV", Const, 0}, + {"SYS_KEVENT", Const, 0}, + {"SYS_KEVENT64", Const, 0}, + {"SYS_KEXEC_LOAD", Const, 0}, + {"SYS_KEYCTL", Const, 0}, + {"SYS_KILL", Const, 0}, + {"SYS_KLDFIND", Const, 0}, + {"SYS_KLDFIRSTMOD", Const, 0}, + {"SYS_KLDLOAD", Const, 0}, + {"SYS_KLDNEXT", Const, 0}, + {"SYS_KLDSTAT", Const, 0}, + {"SYS_KLDSYM", Const, 0}, + {"SYS_KLDUNLOAD", Const, 0}, + {"SYS_KLDUNLOADF", Const, 0}, + {"SYS_KMQ_NOTIFY", Const, 14}, + {"SYS_KMQ_OPEN", Const, 14}, + {"SYS_KMQ_SETATTR", Const, 14}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14}, + {"SYS_KMQ_TIMEDSEND", Const, 14}, + {"SYS_KMQ_UNLINK", Const, 14}, + {"SYS_KQUEUE", Const, 0}, + {"SYS_KQUEUE1", Const, 1}, + {"SYS_KSEM_CLOSE", Const, 14}, + {"SYS_KSEM_DESTROY", Const, 14}, + {"SYS_KSEM_GETVALUE", Const, 14}, + {"SYS_KSEM_INIT", Const, 14}, + {"SYS_KSEM_OPEN", Const, 14}, + {"SYS_KSEM_POST", Const, 14}, + {"SYS_KSEM_TIMEDWAIT", Const, 14}, + {"SYS_KSEM_TRYWAIT", Const, 14}, + {"SYS_KSEM_UNLINK", Const, 14}, + {"SYS_KSEM_WAIT", Const, 14}, + {"SYS_KTIMER_CREATE", Const, 0}, + {"SYS_KTIMER_DELETE", Const, 0}, + {"SYS_KTIMER_GETOVERRUN", Const, 0}, + {"SYS_KTIMER_GETTIME", Const, 0}, + {"SYS_KTIMER_SETTIME", Const, 0}, + {"SYS_KTRACE", Const, 0}, + {"SYS_LCHFLAGS", Const, 0}, + {"SYS_LCHMOD", Const, 0}, + {"SYS_LCHOWN", Const, 0}, + {"SYS_LCHOWN32", Const, 0}, + {"SYS_LEDGER", Const, 16}, + {"SYS_LGETFH", Const, 0}, + {"SYS_LGETXATTR", Const, 0}, + {"SYS_LINK", Const, 0}, + {"SYS_LINKAT", Const, 0}, + {"SYS_LIO_LISTIO", Const, 0}, + {"SYS_LISTEN", Const, 0}, + {"SYS_LISTXATTR", Const, 0}, + {"SYS_LLISTXATTR", Const, 0}, + {"SYS_LOCK", Const, 0}, + {"SYS_LOOKUP_DCOOKIE", Const, 0}, + {"SYS_LPATHCONF", Const, 0}, + {"SYS_LREMOVEXATTR", Const, 0}, + {"SYS_LSEEK", Const, 0}, + {"SYS_LSETXATTR", Const, 0}, + {"SYS_LSTAT", Const, 0}, + {"SYS_LSTAT64", Const, 0}, + {"SYS_LSTAT64_EXTENDED", Const, 0}, + {"SYS_LSTATV", Const, 0}, + {"SYS_LSTAT_EXTENDED", Const, 0}, + {"SYS_LUTIMES", Const, 0}, + {"SYS_MAC_SYSCALL", Const, 0}, + {"SYS_MADVISE", Const, 0}, + {"SYS_MADVISE1", Const, 0}, + {"SYS_MAXSYSCALL", Const, 0}, + {"SYS_MBIND", Const, 0}, + {"SYS_MIGRATE_PAGES", Const, 0}, + {"SYS_MINCORE", Const, 0}, + {"SYS_MINHERIT", Const, 0}, + {"SYS_MKCOMPLEX", Const, 0}, + {"SYS_MKDIR", Const, 0}, + {"SYS_MKDIRAT", Const, 0}, + {"SYS_MKDIR_EXTENDED", Const, 0}, + {"SYS_MKFIFO", Const, 0}, + {"SYS_MKFIFOAT", Const, 0}, + {"SYS_MKFIFO_EXTENDED", Const, 0}, + {"SYS_MKNOD", Const, 0}, + {"SYS_MKNODAT", Const, 0}, + {"SYS_MLOCK", Const, 0}, + {"SYS_MLOCKALL", Const, 0}, + {"SYS_MMAP", Const, 0}, + {"SYS_MMAP2", Const, 0}, + {"SYS_MODCTL", Const, 1}, + {"SYS_MODFIND", Const, 0}, + {"SYS_MODFNEXT", Const, 0}, + {"SYS_MODIFY_LDT", Const, 0}, + {"SYS_MODNEXT", Const, 0}, + {"SYS_MODSTAT", Const, 0}, + {"SYS_MODWATCH", Const, 0}, + {"SYS_MOUNT", Const, 0}, + {"SYS_MOVE_PAGES", Const, 0}, + {"SYS_MPROTECT", Const, 0}, + {"SYS_MPX", Const, 0}, + {"SYS_MQUERY", Const, 1}, + {"SYS_MQ_GETSETATTR", Const, 0}, + {"SYS_MQ_NOTIFY", Const, 0}, + {"SYS_MQ_OPEN", Const, 0}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0}, + {"SYS_MQ_TIMEDSEND", Const, 0}, + {"SYS_MQ_UNLINK", Const, 0}, + {"SYS_MREMAP", Const, 0}, + {"SYS_MSGCTL", Const, 0}, + {"SYS_MSGGET", Const, 0}, + {"SYS_MSGRCV", Const, 0}, + {"SYS_MSGRCV_NOCANCEL", Const, 0}, + {"SYS_MSGSND", Const, 0}, + {"SYS_MSGSND_NOCANCEL", Const, 0}, + {"SYS_MSGSYS", Const, 0}, + {"SYS_MSYNC", Const, 0}, + {"SYS_MSYNC_NOCANCEL", Const, 0}, + {"SYS_MUNLOCK", Const, 0}, + {"SYS_MUNLOCKALL", Const, 0}, + {"SYS_MUNMAP", Const, 0}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0}, + {"SYS_NANOSLEEP", Const, 0}, + {"SYS_NEWFSTATAT", Const, 0}, + {"SYS_NFSCLNT", Const, 0}, + {"SYS_NFSSERVCTL", Const, 0}, + {"SYS_NFSSVC", Const, 0}, + {"SYS_NFSTAT", Const, 0}, + {"SYS_NICE", Const, 0}, + {"SYS_NLM_SYSCALL", Const, 14}, + {"SYS_NLSTAT", Const, 0}, + {"SYS_NMOUNT", Const, 0}, + {"SYS_NSTAT", Const, 0}, + {"SYS_NTP_ADJTIME", Const, 0}, + {"SYS_NTP_GETTIME", Const, 0}, + {"SYS_NUMA_GETAFFINITY", Const, 14}, + {"SYS_NUMA_SETAFFINITY", Const, 14}, + {"SYS_OABI_SYSCALL_BASE", Const, 0}, + {"SYS_OBREAK", Const, 0}, + {"SYS_OLDFSTAT", Const, 0}, + {"SYS_OLDLSTAT", Const, 0}, + {"SYS_OLDOLDUNAME", Const, 0}, + {"SYS_OLDSTAT", Const, 0}, + {"SYS_OLDUNAME", Const, 0}, + {"SYS_OPEN", Const, 0}, + {"SYS_OPENAT", Const, 0}, + {"SYS_OPENBSD_POLL", Const, 0}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16}, + {"SYS_OPEN_EXTENDED", Const, 0}, + {"SYS_OPEN_NOCANCEL", Const, 0}, + {"SYS_OVADVISE", Const, 0}, + {"SYS_PACCEPT", Const, 1}, + {"SYS_PATHCONF", Const, 0}, + {"SYS_PAUSE", Const, 0}, + {"SYS_PCICONFIG_IOBASE", Const, 0}, + {"SYS_PCICONFIG_READ", Const, 0}, + {"SYS_PCICONFIG_WRITE", Const, 0}, + {"SYS_PDFORK", Const, 0}, + {"SYS_PDGETPID", Const, 0}, + {"SYS_PDKILL", Const, 0}, + {"SYS_PERF_EVENT_OPEN", Const, 0}, + {"SYS_PERSONALITY", Const, 0}, + {"SYS_PID_HIBERNATE", Const, 0}, + {"SYS_PID_RESUME", Const, 0}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0}, + {"SYS_PID_SUSPEND", Const, 0}, + {"SYS_PIPE", Const, 0}, + {"SYS_PIPE2", Const, 0}, + {"SYS_PIVOT_ROOT", Const, 0}, + {"SYS_PMC_CONTROL", Const, 1}, + {"SYS_PMC_GET_INFO", Const, 1}, + {"SYS_POLL", Const, 0}, + {"SYS_POLLTS", Const, 1}, + {"SYS_POLL_NOCANCEL", Const, 0}, + {"SYS_POSIX_FADVISE", Const, 0}, + {"SYS_POSIX_FALLOCATE", Const, 0}, + {"SYS_POSIX_OPENPT", Const, 0}, + {"SYS_POSIX_SPAWN", Const, 0}, + {"SYS_PPOLL", Const, 0}, + {"SYS_PRCTL", Const, 0}, + {"SYS_PREAD", Const, 0}, + {"SYS_PREAD64", Const, 0}, + {"SYS_PREADV", Const, 0}, + {"SYS_PREAD_NOCANCEL", Const, 0}, + {"SYS_PRLIMIT64", Const, 0}, + {"SYS_PROCCTL", Const, 3}, + {"SYS_PROCESS_POLICY", Const, 0}, + {"SYS_PROCESS_VM_READV", Const, 0}, + {"SYS_PROCESS_VM_WRITEV", Const, 0}, + {"SYS_PROC_INFO", Const, 0}, + {"SYS_PROF", Const, 0}, + {"SYS_PROFIL", Const, 0}, + {"SYS_PSELECT", Const, 0}, + {"SYS_PSELECT6", Const, 0}, + {"SYS_PSET_ASSIGN", Const, 1}, + {"SYS_PSET_CREATE", Const, 1}, + {"SYS_PSET_DESTROY", Const, 1}, + {"SYS_PSYNCH_CVBROAD", Const, 0}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0}, + {"SYS_PSYNCH_CVWAIT", Const, 0}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0}, + {"SYS_PTRACE", Const, 0}, + {"SYS_PUTPMSG", Const, 0}, + {"SYS_PWRITE", Const, 0}, + {"SYS_PWRITE64", Const, 0}, + {"SYS_PWRITEV", Const, 0}, + {"SYS_PWRITE_NOCANCEL", Const, 0}, + {"SYS_QUERY_MODULE", Const, 0}, + {"SYS_QUOTACTL", Const, 0}, + {"SYS_RASCTL", Const, 1}, + {"SYS_RCTL_ADD_RULE", Const, 0}, + {"SYS_RCTL_GET_LIMITS", Const, 0}, + {"SYS_RCTL_GET_RACCT", Const, 0}, + {"SYS_RCTL_GET_RULES", Const, 0}, + {"SYS_RCTL_REMOVE_RULE", Const, 0}, + {"SYS_READ", Const, 0}, + {"SYS_READAHEAD", Const, 0}, + {"SYS_READDIR", Const, 0}, + {"SYS_READLINK", Const, 0}, + {"SYS_READLINKAT", Const, 0}, + {"SYS_READV", Const, 0}, + {"SYS_READV_NOCANCEL", Const, 0}, + {"SYS_READ_NOCANCEL", Const, 0}, + {"SYS_REBOOT", Const, 0}, + {"SYS_RECV", Const, 0}, + {"SYS_RECVFROM", Const, 0}, + {"SYS_RECVFROM_NOCANCEL", Const, 0}, + {"SYS_RECVMMSG", Const, 0}, + {"SYS_RECVMSG", Const, 0}, + {"SYS_RECVMSG_NOCANCEL", Const, 0}, + {"SYS_REMAP_FILE_PAGES", Const, 0}, + {"SYS_REMOVEXATTR", Const, 0}, + {"SYS_RENAME", Const, 0}, + {"SYS_RENAMEAT", Const, 0}, + {"SYS_REQUEST_KEY", Const, 0}, + {"SYS_RESTART_SYSCALL", Const, 0}, + {"SYS_REVOKE", Const, 0}, + {"SYS_RFORK", Const, 0}, + {"SYS_RMDIR", Const, 0}, + {"SYS_RTPRIO", Const, 0}, + {"SYS_RTPRIO_THREAD", Const, 0}, + {"SYS_RT_SIGACTION", Const, 0}, + {"SYS_RT_SIGPENDING", Const, 0}, + {"SYS_RT_SIGPROCMASK", Const, 0}, + {"SYS_RT_SIGQUEUEINFO", Const, 0}, + {"SYS_RT_SIGRETURN", Const, 0}, + {"SYS_RT_SIGSUSPEND", Const, 0}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0}, + {"SYS_SBRK", Const, 0}, + {"SYS_SCHED_GETAFFINITY", Const, 0}, + {"SYS_SCHED_GETPARAM", Const, 0}, + {"SYS_SCHED_GETSCHEDULER", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0}, + {"SYS_SCHED_SETAFFINITY", Const, 0}, + {"SYS_SCHED_SETPARAM", Const, 0}, + {"SYS_SCHED_SETSCHEDULER", Const, 0}, + {"SYS_SCHED_YIELD", Const, 0}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0}, + {"SYS_SCTP_PEELOFF", Const, 0}, + {"SYS_SEARCHFS", Const, 0}, + {"SYS_SECURITY", Const, 0}, + {"SYS_SELECT", Const, 0}, + {"SYS_SELECT_NOCANCEL", Const, 0}, + {"SYS_SEMCONFIG", Const, 1}, + {"SYS_SEMCTL", Const, 0}, + {"SYS_SEMGET", Const, 0}, + {"SYS_SEMOP", Const, 0}, + {"SYS_SEMSYS", Const, 0}, + {"SYS_SEMTIMEDOP", Const, 0}, + {"SYS_SEM_CLOSE", Const, 0}, + {"SYS_SEM_DESTROY", Const, 0}, + {"SYS_SEM_GETVALUE", Const, 0}, + {"SYS_SEM_INIT", Const, 0}, + {"SYS_SEM_OPEN", Const, 0}, + {"SYS_SEM_POST", Const, 0}, + {"SYS_SEM_TRYWAIT", Const, 0}, + {"SYS_SEM_UNLINK", Const, 0}, + {"SYS_SEM_WAIT", Const, 0}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0}, + {"SYS_SEND", Const, 0}, + {"SYS_SENDFILE", Const, 0}, + {"SYS_SENDFILE64", Const, 0}, + {"SYS_SENDMMSG", Const, 0}, + {"SYS_SENDMSG", Const, 0}, + {"SYS_SENDMSG_NOCANCEL", Const, 0}, + {"SYS_SENDTO", Const, 0}, + {"SYS_SENDTO_NOCANCEL", Const, 0}, + {"SYS_SETATTRLIST", Const, 0}, + {"SYS_SETAUDIT", Const, 0}, + {"SYS_SETAUDIT_ADDR", Const, 0}, + {"SYS_SETAUID", Const, 0}, + {"SYS_SETCONTEXT", Const, 0}, + {"SYS_SETDOMAINNAME", Const, 0}, + {"SYS_SETEGID", Const, 0}, + {"SYS_SETEUID", Const, 0}, + {"SYS_SETFIB", Const, 0}, + {"SYS_SETFSGID", Const, 0}, + {"SYS_SETFSGID32", Const, 0}, + {"SYS_SETFSUID", Const, 0}, + {"SYS_SETFSUID32", Const, 0}, + {"SYS_SETGID", Const, 0}, + {"SYS_SETGID32", Const, 0}, + {"SYS_SETGROUPS", Const, 0}, + {"SYS_SETGROUPS32", Const, 0}, + {"SYS_SETHOSTNAME", Const, 0}, + {"SYS_SETITIMER", Const, 0}, + {"SYS_SETLCID", Const, 0}, + {"SYS_SETLOGIN", Const, 0}, + {"SYS_SETLOGINCLASS", Const, 0}, + {"SYS_SETNS", Const, 0}, + {"SYS_SETPGID", Const, 0}, + {"SYS_SETPRIORITY", Const, 0}, + {"SYS_SETPRIVEXEC", Const, 0}, + {"SYS_SETREGID", Const, 0}, + {"SYS_SETREGID32", Const, 0}, + {"SYS_SETRESGID", Const, 0}, + {"SYS_SETRESGID32", Const, 0}, + {"SYS_SETRESUID", Const, 0}, + {"SYS_SETRESUID32", Const, 0}, + {"SYS_SETREUID", Const, 0}, + {"SYS_SETREUID32", Const, 0}, + {"SYS_SETRLIMIT", Const, 0}, + {"SYS_SETRTABLE", Const, 1}, + {"SYS_SETSGROUPS", Const, 0}, + {"SYS_SETSID", Const, 0}, + {"SYS_SETSOCKOPT", Const, 0}, + {"SYS_SETTID", Const, 0}, + {"SYS_SETTID_WITH_PID", Const, 0}, + {"SYS_SETTIMEOFDAY", Const, 0}, + {"SYS_SETUID", Const, 0}, + {"SYS_SETUID32", Const, 0}, + {"SYS_SETWGROUPS", Const, 0}, + {"SYS_SETXATTR", Const, 0}, + {"SYS_SET_MEMPOLICY", Const, 0}, + {"SYS_SET_ROBUST_LIST", Const, 0}, + {"SYS_SET_THREAD_AREA", Const, 0}, + {"SYS_SET_TID_ADDRESS", Const, 0}, + {"SYS_SGETMASK", Const, 0}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0}, + {"SYS_SHMAT", Const, 0}, + {"SYS_SHMCTL", Const, 0}, + {"SYS_SHMDT", Const, 0}, + {"SYS_SHMGET", Const, 0}, + {"SYS_SHMSYS", Const, 0}, + {"SYS_SHM_OPEN", Const, 0}, + {"SYS_SHM_UNLINK", Const, 0}, + {"SYS_SHUTDOWN", Const, 0}, + {"SYS_SIGACTION", Const, 0}, + {"SYS_SIGALTSTACK", Const, 0}, + {"SYS_SIGNAL", Const, 0}, + {"SYS_SIGNALFD", Const, 0}, + {"SYS_SIGNALFD4", Const, 0}, + {"SYS_SIGPENDING", Const, 0}, + {"SYS_SIGPROCMASK", Const, 0}, + {"SYS_SIGQUEUE", Const, 0}, + {"SYS_SIGQUEUEINFO", Const, 1}, + {"SYS_SIGRETURN", Const, 0}, + {"SYS_SIGSUSPEND", Const, 0}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0}, + {"SYS_SIGTIMEDWAIT", Const, 0}, + {"SYS_SIGWAIT", Const, 0}, + {"SYS_SIGWAITINFO", Const, 0}, + {"SYS_SOCKET", Const, 0}, + {"SYS_SOCKETCALL", Const, 0}, + {"SYS_SOCKETPAIR", Const, 0}, + {"SYS_SPLICE", Const, 0}, + {"SYS_SSETMASK", Const, 0}, + {"SYS_SSTK", Const, 0}, + {"SYS_STACK_SNAPSHOT", Const, 0}, + {"SYS_STAT", Const, 0}, + {"SYS_STAT64", Const, 0}, + {"SYS_STAT64_EXTENDED", Const, 0}, + {"SYS_STATFS", Const, 0}, + {"SYS_STATFS64", Const, 0}, + {"SYS_STATV", Const, 0}, + {"SYS_STATVFS1", Const, 1}, + {"SYS_STAT_EXTENDED", Const, 0}, + {"SYS_STIME", Const, 0}, + {"SYS_STTY", Const, 0}, + {"SYS_SWAPCONTEXT", Const, 0}, + {"SYS_SWAPCTL", Const, 1}, + {"SYS_SWAPOFF", Const, 0}, + {"SYS_SWAPON", Const, 0}, + {"SYS_SYMLINK", Const, 0}, + {"SYS_SYMLINKAT", Const, 0}, + {"SYS_SYNC", Const, 0}, + {"SYS_SYNCFS", Const, 0}, + {"SYS_SYNC_FILE_RANGE", Const, 0}, + {"SYS_SYSARCH", Const, 0}, + {"SYS_SYSCALL", Const, 0}, + {"SYS_SYSCALL_BASE", Const, 0}, + {"SYS_SYSFS", Const, 0}, + {"SYS_SYSINFO", Const, 0}, + {"SYS_SYSLOG", Const, 0}, + {"SYS_TEE", Const, 0}, + {"SYS_TGKILL", Const, 0}, + {"SYS_THREAD_SELFID", Const, 0}, + {"SYS_THR_CREATE", Const, 0}, + {"SYS_THR_EXIT", Const, 0}, + {"SYS_THR_KILL", Const, 0}, + {"SYS_THR_KILL2", Const, 0}, + {"SYS_THR_NEW", Const, 0}, + {"SYS_THR_SELF", Const, 0}, + {"SYS_THR_SET_NAME", Const, 0}, + {"SYS_THR_SUSPEND", Const, 0}, + {"SYS_THR_WAKE", Const, 0}, + {"SYS_TIME", Const, 0}, + {"SYS_TIMERFD_CREATE", Const, 0}, + {"SYS_TIMERFD_GETTIME", Const, 0}, + {"SYS_TIMERFD_SETTIME", Const, 0}, + {"SYS_TIMER_CREATE", Const, 0}, + {"SYS_TIMER_DELETE", Const, 0}, + {"SYS_TIMER_GETOVERRUN", Const, 0}, + {"SYS_TIMER_GETTIME", Const, 0}, + {"SYS_TIMER_SETTIME", Const, 0}, + {"SYS_TIMES", Const, 0}, + {"SYS_TKILL", Const, 0}, + {"SYS_TRUNCATE", Const, 0}, + {"SYS_TRUNCATE64", Const, 0}, + {"SYS_TUXCALL", Const, 0}, + {"SYS_UGETRLIMIT", Const, 0}, + {"SYS_ULIMIT", Const, 0}, + {"SYS_UMASK", Const, 0}, + {"SYS_UMASK_EXTENDED", Const, 0}, + {"SYS_UMOUNT", Const, 0}, + {"SYS_UMOUNT2", Const, 0}, + {"SYS_UNAME", Const, 0}, + {"SYS_UNDELETE", Const, 0}, + {"SYS_UNLINK", Const, 0}, + {"SYS_UNLINKAT", Const, 0}, + {"SYS_UNMOUNT", Const, 0}, + {"SYS_UNSHARE", Const, 0}, + {"SYS_USELIB", Const, 0}, + {"SYS_USTAT", Const, 0}, + {"SYS_UTIME", Const, 0}, + {"SYS_UTIMENSAT", Const, 0}, + {"SYS_UTIMES", Const, 0}, + {"SYS_UTRACE", Const, 0}, + {"SYS_UUIDGEN", Const, 0}, + {"SYS_VADVISE", Const, 1}, + {"SYS_VFORK", Const, 0}, + {"SYS_VHANGUP", Const, 0}, + {"SYS_VM86", Const, 0}, + {"SYS_VM86OLD", Const, 0}, + {"SYS_VMSPLICE", Const, 0}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0}, + {"SYS_VSERVER", Const, 0}, + {"SYS_WAIT4", Const, 0}, + {"SYS_WAIT4_NOCANCEL", Const, 0}, + {"SYS_WAIT6", Const, 1}, + {"SYS_WAITEVENT", Const, 0}, + {"SYS_WAITID", Const, 0}, + {"SYS_WAITID_NOCANCEL", Const, 0}, + {"SYS_WAITPID", Const, 0}, + {"SYS_WATCHEVENT", Const, 0}, + {"SYS_WORKQ_KERNRETURN", Const, 0}, + {"SYS_WORKQ_OPEN", Const, 0}, + {"SYS_WRITE", Const, 0}, + {"SYS_WRITEV", Const, 0}, + {"SYS_WRITEV_NOCANCEL", Const, 0}, + {"SYS_WRITE_NOCANCEL", Const, 0}, + {"SYS_YIELD", Const, 0}, + {"SYS__LLSEEK", Const, 0}, + {"SYS__LWP_CONTINUE", Const, 1}, + {"SYS__LWP_CREATE", Const, 1}, + {"SYS__LWP_CTL", Const, 1}, + {"SYS__LWP_DETACH", Const, 1}, + {"SYS__LWP_EXIT", Const, 1}, + {"SYS__LWP_GETNAME", Const, 1}, + {"SYS__LWP_GETPRIVATE", Const, 1}, + {"SYS__LWP_KILL", Const, 1}, + {"SYS__LWP_PARK", Const, 1}, + {"SYS__LWP_SELF", Const, 1}, + {"SYS__LWP_SETNAME", Const, 1}, + {"SYS__LWP_SETPRIVATE", Const, 1}, + {"SYS__LWP_SUSPEND", Const, 1}, + {"SYS__LWP_UNPARK", Const, 1}, + {"SYS__LWP_UNPARK_ALL", Const, 1}, + {"SYS__LWP_WAIT", Const, 1}, + {"SYS__LWP_WAKEUP", Const, 1}, + {"SYS__NEWSELECT", Const, 0}, + {"SYS__PSET_BIND", Const, 1}, + {"SYS__SCHED_GETAFFINITY", Const, 1}, + {"SYS__SCHED_GETPARAM", Const, 1}, + {"SYS__SCHED_SETAFFINITY", Const, 1}, + {"SYS__SCHED_SETPARAM", Const, 1}, + {"SYS__SYSCTL", Const, 0}, + {"SYS__UMTX_LOCK", Const, 0}, + {"SYS__UMTX_OP", Const, 0}, + {"SYS__UMTX_UNLOCK", Const, 0}, + {"SYS___ACL_ACLCHECK_FD", Const, 0}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0}, + {"SYS___ACL_DELETE_FD", Const, 0}, + {"SYS___ACL_DELETE_FILE", Const, 0}, + {"SYS___ACL_DELETE_LINK", Const, 0}, + {"SYS___ACL_GET_FD", Const, 0}, + {"SYS___ACL_GET_FILE", Const, 0}, + {"SYS___ACL_GET_LINK", Const, 0}, + {"SYS___ACL_SET_FD", Const, 0}, + {"SYS___ACL_SET_FILE", Const, 0}, + {"SYS___ACL_SET_LINK", Const, 0}, + {"SYS___CAP_RIGHTS_GET", Const, 14}, + {"SYS___CLONE", Const, 1}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0}, + {"SYS___GETCWD", Const, 0}, + {"SYS___GETLOGIN", Const, 1}, + {"SYS___GET_TCB", Const, 1}, + {"SYS___MAC_EXECVE", Const, 0}, + {"SYS___MAC_GETFSSTAT", Const, 0}, + {"SYS___MAC_GET_FD", Const, 0}, + {"SYS___MAC_GET_FILE", Const, 0}, + {"SYS___MAC_GET_LCID", Const, 0}, + {"SYS___MAC_GET_LCTX", Const, 0}, + {"SYS___MAC_GET_LINK", Const, 0}, + {"SYS___MAC_GET_MOUNT", Const, 0}, + {"SYS___MAC_GET_PID", Const, 0}, + {"SYS___MAC_GET_PROC", Const, 0}, + {"SYS___MAC_MOUNT", Const, 0}, + {"SYS___MAC_SET_FD", Const, 0}, + {"SYS___MAC_SET_FILE", Const, 0}, + {"SYS___MAC_SET_LCTX", Const, 0}, + {"SYS___MAC_SET_LINK", Const, 0}, + {"SYS___MAC_SET_PROC", Const, 0}, + {"SYS___MAC_SYSCALL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___POSIX_CHOWN", Const, 1}, + {"SYS___POSIX_FCHOWN", Const, 1}, + {"SYS___POSIX_LCHOWN", Const, 1}, + {"SYS___POSIX_RENAME", Const, 1}, + {"SYS___PTHREAD_CANCELED", Const, 0}, + {"SYS___PTHREAD_CHDIR", Const, 0}, + {"SYS___PTHREAD_FCHDIR", Const, 0}, + {"SYS___PTHREAD_KILL", Const, 0}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0}, + {"SYS___PTHREAD_SIGMASK", Const, 0}, + {"SYS___QUOTACTL", Const, 1}, + {"SYS___SEMCTL", Const, 1}, + {"SYS___SEMWAIT_SIGNAL", Const, 0}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___SETLOGIN", Const, 1}, + {"SYS___SETUGID", Const, 0}, + {"SYS___SET_TCB", Const, 1}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1}, + {"SYS___SIGTIMEDWAIT", Const, 1}, + {"SYS___SIGWAIT", Const, 0}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0}, + {"SYS___SYSCTL", Const, 0}, + {"SYS___TFORK", Const, 1}, + {"SYS___THREXIT", Const, 1}, + {"SYS___THRSIGDIVERT", Const, 1}, + {"SYS___THRSLEEP", Const, 1}, + {"SYS___THRWAKEUP", Const, 1}, + {"S_ARCH1", Const, 1}, + {"S_ARCH2", Const, 1}, + {"S_BLKSIZE", Const, 0}, + {"S_IEXEC", Const, 0}, + {"S_IFBLK", Const, 0}, + {"S_IFCHR", Const, 0}, + {"S_IFDIR", Const, 0}, + {"S_IFIFO", Const, 0}, + {"S_IFLNK", Const, 0}, + {"S_IFMT", Const, 0}, + {"S_IFREG", Const, 0}, + {"S_IFSOCK", Const, 0}, + {"S_IFWHT", Const, 0}, + {"S_IREAD", Const, 0}, + {"S_IRGRP", Const, 0}, + {"S_IROTH", Const, 0}, + {"S_IRUSR", Const, 0}, + {"S_IRWXG", Const, 0}, + {"S_IRWXO", Const, 0}, + {"S_IRWXU", Const, 0}, + {"S_ISGID", Const, 0}, + {"S_ISTXT", Const, 0}, + {"S_ISUID", Const, 0}, + {"S_ISVTX", Const, 0}, + {"S_IWGRP", Const, 0}, + {"S_IWOTH", Const, 0}, + {"S_IWRITE", Const, 0}, + {"S_IWUSR", Const, 0}, + {"S_IXGRP", Const, 0}, + {"S_IXOTH", Const, 0}, + {"S_IXUSR", Const, 0}, + {"S_LOGIN_SET", Const, 1}, + {"SecurityAttributes", Type, 0}, + {"SecurityAttributes.InheritHandle", Field, 0}, + {"SecurityAttributes.Length", Field, 0}, + {"SecurityAttributes.SecurityDescriptor", Field, 0}, + {"Seek", Func, 0}, + {"Select", Func, 0}, + {"Sendfile", Func, 0}, + {"Sendmsg", Func, 0}, + {"SendmsgN", Func, 3}, + {"Sendto", Func, 0}, + {"Servent", Type, 0}, + {"Servent.Aliases", Field, 0}, + {"Servent.Name", Field, 0}, + {"Servent.Port", Field, 0}, + {"Servent.Proto", Field, 0}, + {"SetBpf", Func, 0}, + {"SetBpfBuflen", Func, 0}, + {"SetBpfDatalink", Func, 0}, + {"SetBpfHeadercmpl", Func, 0}, + {"SetBpfImmediate", Func, 0}, + {"SetBpfInterface", Func, 0}, + {"SetBpfPromisc", Func, 0}, + {"SetBpfTimeout", Func, 0}, + {"SetCurrentDirectory", Func, 0}, + {"SetEndOfFile", Func, 0}, + {"SetEnvironmentVariable", Func, 0}, + {"SetFileAttributes", Func, 0}, + {"SetFileCompletionNotificationModes", Func, 2}, + {"SetFilePointer", Func, 0}, + {"SetFileTime", Func, 0}, + {"SetHandleInformation", Func, 0}, + {"SetKevent", Func, 0}, + {"SetLsfPromisc", Func, 0}, + {"SetNonblock", Func, 0}, + {"Setdomainname", Func, 0}, + {"Setegid", Func, 0}, + {"Setenv", Func, 0}, + {"Seteuid", Func, 0}, + {"Setfsgid", Func, 0}, + {"Setfsuid", Func, 0}, + {"Setgid", Func, 0}, + {"Setgroups", Func, 0}, + {"Sethostname", Func, 0}, + {"Setlogin", Func, 0}, + {"Setpgid", Func, 0}, + {"Setpriority", Func, 0}, + {"Setprivexec", Func, 0}, + {"Setregid", Func, 0}, + {"Setresgid", Func, 0}, + {"Setresuid", Func, 0}, + {"Setreuid", Func, 0}, + {"Setrlimit", Func, 0}, + {"Setsid", Func, 0}, + {"Setsockopt", Func, 0}, + {"SetsockoptByte", Func, 0}, + {"SetsockoptICMPv6Filter", Func, 2}, + {"SetsockoptIPMreq", Func, 0}, + {"SetsockoptIPMreqn", Func, 0}, + {"SetsockoptIPv6Mreq", Func, 0}, + {"SetsockoptInet4Addr", Func, 0}, + {"SetsockoptInt", Func, 0}, + {"SetsockoptLinger", Func, 0}, + {"SetsockoptString", Func, 0}, + {"SetsockoptTimeval", Func, 0}, + {"Settimeofday", Func, 0}, + {"Setuid", Func, 0}, + {"Setxattr", Func, 1}, + {"Shutdown", Func, 0}, + {"SidTypeAlias", Const, 0}, + {"SidTypeComputer", Const, 0}, + {"SidTypeDeletedAccount", Const, 0}, + {"SidTypeDomain", Const, 0}, + {"SidTypeGroup", Const, 0}, + {"SidTypeInvalid", Const, 0}, + {"SidTypeLabel", Const, 0}, + {"SidTypeUnknown", Const, 0}, + {"SidTypeUser", Const, 0}, + {"SidTypeWellKnownGroup", Const, 0}, + {"Signal", Type, 0}, + {"SizeofBpfHdr", Const, 0}, + {"SizeofBpfInsn", Const, 0}, + {"SizeofBpfProgram", Const, 0}, + {"SizeofBpfStat", Const, 0}, + {"SizeofBpfVersion", Const, 0}, + {"SizeofBpfZbuf", Const, 0}, + {"SizeofBpfZbufHeader", Const, 0}, + {"SizeofCmsghdr", Const, 0}, + {"SizeofICMPv6Filter", Const, 2}, + {"SizeofIPMreq", Const, 0}, + {"SizeofIPMreqn", Const, 0}, + {"SizeofIPv6MTUInfo", Const, 2}, + {"SizeofIPv6Mreq", Const, 0}, + {"SizeofIfAddrmsg", Const, 0}, + {"SizeofIfAnnounceMsghdr", Const, 1}, + {"SizeofIfData", Const, 0}, + {"SizeofIfInfomsg", Const, 0}, + {"SizeofIfMsghdr", Const, 0}, + {"SizeofIfaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr2", Const, 0}, + {"SizeofInet4Pktinfo", Const, 0}, + {"SizeofInet6Pktinfo", Const, 0}, + {"SizeofInotifyEvent", Const, 0}, + {"SizeofLinger", Const, 0}, + {"SizeofMsghdr", Const, 0}, + {"SizeofNlAttr", Const, 0}, + {"SizeofNlMsgerr", Const, 0}, + {"SizeofNlMsghdr", Const, 0}, + {"SizeofRtAttr", Const, 0}, + {"SizeofRtGenmsg", Const, 0}, + {"SizeofRtMetrics", Const, 0}, + {"SizeofRtMsg", Const, 0}, + {"SizeofRtMsghdr", Const, 0}, + {"SizeofRtNexthop", Const, 0}, + {"SizeofSockFilter", Const, 0}, + {"SizeofSockFprog", Const, 0}, + {"SizeofSockaddrAny", Const, 0}, + {"SizeofSockaddrDatalink", Const, 0}, + {"SizeofSockaddrInet4", Const, 0}, + {"SizeofSockaddrInet6", Const, 0}, + {"SizeofSockaddrLinklayer", Const, 0}, + {"SizeofSockaddrNetlink", Const, 0}, + {"SizeofSockaddrUnix", Const, 0}, + {"SizeofTCPInfo", Const, 1}, + {"SizeofUcred", Const, 0}, + {"SlicePtrFromStrings", Func, 1}, + {"SockFilter", Type, 0}, + {"SockFilter.Code", Field, 0}, + {"SockFilter.Jf", Field, 0}, + {"SockFilter.Jt", Field, 0}, + {"SockFilter.K", Field, 0}, + {"SockFprog", Type, 0}, + {"SockFprog.Filter", Field, 0}, + {"SockFprog.Len", Field, 0}, + {"SockFprog.Pad_cgo_0", Field, 0}, + {"Sockaddr", Type, 0}, + {"SockaddrDatalink", Type, 0}, + {"SockaddrDatalink.Alen", Field, 0}, + {"SockaddrDatalink.Data", Field, 0}, + {"SockaddrDatalink.Family", Field, 0}, + {"SockaddrDatalink.Index", Field, 0}, + {"SockaddrDatalink.Len", Field, 0}, + {"SockaddrDatalink.Nlen", Field, 0}, + {"SockaddrDatalink.Slen", Field, 0}, + {"SockaddrDatalink.Type", Field, 0}, + {"SockaddrGen", Type, 0}, + {"SockaddrInet4", Type, 0}, + {"SockaddrInet4.Addr", Field, 0}, + {"SockaddrInet4.Port", Field, 0}, + {"SockaddrInet6", Type, 0}, + {"SockaddrInet6.Addr", Field, 0}, + {"SockaddrInet6.Port", Field, 0}, + {"SockaddrInet6.ZoneId", Field, 0}, + {"SockaddrLinklayer", Type, 0}, + {"SockaddrLinklayer.Addr", Field, 0}, + {"SockaddrLinklayer.Halen", Field, 0}, + {"SockaddrLinklayer.Hatype", Field, 0}, + {"SockaddrLinklayer.Ifindex", Field, 0}, + {"SockaddrLinklayer.Pkttype", Field, 0}, + {"SockaddrLinklayer.Protocol", Field, 0}, + {"SockaddrNetlink", Type, 0}, + {"SockaddrNetlink.Family", Field, 0}, + {"SockaddrNetlink.Groups", Field, 0}, + {"SockaddrNetlink.Pad", Field, 0}, + {"SockaddrNetlink.Pid", Field, 0}, + {"SockaddrUnix", Type, 0}, + {"SockaddrUnix.Name", Field, 0}, + {"Socket", Func, 0}, + {"SocketControlMessage", Type, 0}, + {"SocketControlMessage.Data", Field, 0}, + {"SocketControlMessage.Header", Field, 0}, + {"SocketDisableIPv6", Var, 0}, + {"Socketpair", Func, 0}, + {"Splice", Func, 0}, + {"StartProcess", Func, 0}, + {"StartupInfo", Type, 0}, + {"StartupInfo.Cb", Field, 0}, + {"StartupInfo.Desktop", Field, 0}, + {"StartupInfo.FillAttribute", Field, 0}, + {"StartupInfo.Flags", Field, 0}, + {"StartupInfo.ShowWindow", Field, 0}, + {"StartupInfo.StdErr", Field, 0}, + {"StartupInfo.StdInput", Field, 0}, + {"StartupInfo.StdOutput", Field, 0}, + {"StartupInfo.Title", Field, 0}, + {"StartupInfo.X", Field, 0}, + {"StartupInfo.XCountChars", Field, 0}, + {"StartupInfo.XSize", Field, 0}, + {"StartupInfo.Y", Field, 0}, + {"StartupInfo.YCountChars", Field, 0}, + {"StartupInfo.YSize", Field, 0}, + {"Stat", Func, 0}, + {"Stat_t", Type, 0}, + {"Stat_t.Atim", Field, 0}, + {"Stat_t.Atim_ext", Field, 12}, + {"Stat_t.Atimespec", Field, 0}, + {"Stat_t.Birthtimespec", Field, 0}, + {"Stat_t.Blksize", Field, 0}, + {"Stat_t.Blocks", Field, 0}, + {"Stat_t.Btim_ext", Field, 12}, + {"Stat_t.Ctim", Field, 0}, + {"Stat_t.Ctim_ext", Field, 12}, + {"Stat_t.Ctimespec", Field, 0}, + {"Stat_t.Dev", Field, 0}, + {"Stat_t.Flags", Field, 0}, + {"Stat_t.Gen", Field, 0}, + {"Stat_t.Gid", Field, 0}, + {"Stat_t.Ino", Field, 0}, + {"Stat_t.Lspare", Field, 0}, + {"Stat_t.Lspare0", Field, 2}, + {"Stat_t.Lspare1", Field, 2}, + {"Stat_t.Mode", Field, 0}, + {"Stat_t.Mtim", Field, 0}, + {"Stat_t.Mtim_ext", Field, 12}, + {"Stat_t.Mtimespec", Field, 0}, + {"Stat_t.Nlink", Field, 0}, + {"Stat_t.Pad_cgo_0", Field, 0}, + {"Stat_t.Pad_cgo_1", Field, 0}, + {"Stat_t.Pad_cgo_2", Field, 0}, + {"Stat_t.Padding0", Field, 12}, + {"Stat_t.Padding1", Field, 12}, + {"Stat_t.Qspare", Field, 0}, + {"Stat_t.Rdev", Field, 0}, + {"Stat_t.Size", Field, 0}, + {"Stat_t.Spare", Field, 2}, + {"Stat_t.Uid", Field, 0}, + {"Stat_t.X__pad0", Field, 0}, + {"Stat_t.X__pad1", Field, 0}, + {"Stat_t.X__pad2", Field, 0}, + {"Stat_t.X__st_birthtim", Field, 2}, + {"Stat_t.X__st_ino", Field, 0}, + {"Stat_t.X__unused", Field, 0}, + {"Statfs", Func, 0}, + {"Statfs_t", Type, 0}, + {"Statfs_t.Asyncreads", Field, 0}, + {"Statfs_t.Asyncwrites", Field, 0}, + {"Statfs_t.Bavail", Field, 0}, + {"Statfs_t.Bfree", Field, 0}, + {"Statfs_t.Blocks", Field, 0}, + {"Statfs_t.Bsize", Field, 0}, + {"Statfs_t.Charspare", Field, 0}, + {"Statfs_t.F_asyncreads", Field, 2}, + {"Statfs_t.F_asyncwrites", Field, 2}, + {"Statfs_t.F_bavail", Field, 2}, + {"Statfs_t.F_bfree", Field, 2}, + {"Statfs_t.F_blocks", Field, 2}, + {"Statfs_t.F_bsize", Field, 2}, + {"Statfs_t.F_ctime", Field, 2}, + {"Statfs_t.F_favail", Field, 2}, + {"Statfs_t.F_ffree", Field, 2}, + {"Statfs_t.F_files", Field, 2}, + {"Statfs_t.F_flags", Field, 2}, + {"Statfs_t.F_fsid", Field, 2}, + {"Statfs_t.F_fstypename", Field, 2}, + {"Statfs_t.F_iosize", Field, 2}, + {"Statfs_t.F_mntfromname", Field, 2}, + {"Statfs_t.F_mntfromspec", Field, 3}, + {"Statfs_t.F_mntonname", Field, 2}, + {"Statfs_t.F_namemax", Field, 2}, + {"Statfs_t.F_owner", Field, 2}, + {"Statfs_t.F_spare", Field, 2}, + {"Statfs_t.F_syncreads", Field, 2}, + {"Statfs_t.F_syncwrites", Field, 2}, + {"Statfs_t.Ffree", Field, 0}, + {"Statfs_t.Files", Field, 0}, + {"Statfs_t.Flags", Field, 0}, + {"Statfs_t.Frsize", Field, 0}, + {"Statfs_t.Fsid", Field, 0}, + {"Statfs_t.Fssubtype", Field, 0}, + {"Statfs_t.Fstypename", Field, 0}, + {"Statfs_t.Iosize", Field, 0}, + {"Statfs_t.Mntfromname", Field, 0}, + {"Statfs_t.Mntonname", Field, 0}, + {"Statfs_t.Mount_info", Field, 2}, + {"Statfs_t.Namelen", Field, 0}, + {"Statfs_t.Namemax", Field, 0}, + {"Statfs_t.Owner", Field, 0}, + {"Statfs_t.Pad_cgo_0", Field, 0}, + {"Statfs_t.Pad_cgo_1", Field, 2}, + {"Statfs_t.Reserved", Field, 0}, + {"Statfs_t.Spare", Field, 0}, + {"Statfs_t.Syncreads", Field, 0}, + {"Statfs_t.Syncwrites", Field, 0}, + {"Statfs_t.Type", Field, 0}, + {"Statfs_t.Version", Field, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"StringBytePtr", Func, 0}, + {"StringByteSlice", Func, 0}, + {"StringSlicePtr", Func, 0}, + {"StringToSid", Func, 0}, + {"StringToUTF16", Func, 0}, + {"StringToUTF16Ptr", Func, 0}, + {"Symlink", Func, 0}, + {"Sync", Func, 0}, + {"SyncFileRange", Func, 0}, + {"SysProcAttr", Type, 0}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17}, + {"SysProcAttr.AmbientCaps", Field, 9}, + {"SysProcAttr.CgroupFD", Field, 20}, + {"SysProcAttr.Chroot", Field, 0}, + {"SysProcAttr.Cloneflags", Field, 2}, + {"SysProcAttr.CmdLine", Field, 0}, + {"SysProcAttr.CreationFlags", Field, 1}, + {"SysProcAttr.Credential", Field, 0}, + {"SysProcAttr.Ctty", Field, 1}, + {"SysProcAttr.Foreground", Field, 5}, + {"SysProcAttr.GidMappings", Field, 4}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5}, + {"SysProcAttr.HideWindow", Field, 0}, + {"SysProcAttr.Jail", Field, 21}, + {"SysProcAttr.NoInheritHandles", Field, 16}, + {"SysProcAttr.Noctty", Field, 0}, + {"SysProcAttr.ParentProcess", Field, 17}, + {"SysProcAttr.Pdeathsig", Field, 0}, + {"SysProcAttr.Pgid", Field, 5}, + {"SysProcAttr.PidFD", Field, 22}, + {"SysProcAttr.ProcessAttributes", Field, 13}, + {"SysProcAttr.Ptrace", Field, 0}, + {"SysProcAttr.Setctty", Field, 0}, + {"SysProcAttr.Setpgid", Field, 0}, + {"SysProcAttr.Setsid", Field, 0}, + {"SysProcAttr.ThreadAttributes", Field, 13}, + {"SysProcAttr.Token", Field, 10}, + {"SysProcAttr.UidMappings", Field, 4}, + {"SysProcAttr.Unshareflags", Field, 7}, + {"SysProcAttr.UseCgroupFD", Field, 20}, + {"SysProcIDMap", Type, 4}, + {"SysProcIDMap.ContainerID", Field, 4}, + {"SysProcIDMap.HostID", Field, 4}, + {"SysProcIDMap.Size", Field, 4}, + {"Syscall", Func, 0}, + {"Syscall12", Func, 0}, + {"Syscall15", Func, 0}, + {"Syscall18", Func, 12}, + {"Syscall6", Func, 0}, + {"Syscall9", Func, 0}, + {"SyscallN", Func, 18}, + {"Sysctl", Func, 0}, + {"SysctlUint32", Func, 0}, + {"Sysctlnode", Type, 2}, + {"Sysctlnode.Flags", Field, 2}, + {"Sysctlnode.Name", Field, 2}, + {"Sysctlnode.Num", Field, 2}, + {"Sysctlnode.Un", Field, 2}, + {"Sysctlnode.Ver", Field, 2}, + {"Sysctlnode.X__rsvd", Field, 2}, + {"Sysctlnode.X_sysctl_desc", Field, 2}, + {"Sysctlnode.X_sysctl_func", Field, 2}, + {"Sysctlnode.X_sysctl_parent", Field, 2}, + {"Sysctlnode.X_sysctl_size", Field, 2}, + {"Sysinfo", Func, 0}, + {"Sysinfo_t", Type, 0}, + {"Sysinfo_t.Bufferram", Field, 0}, + {"Sysinfo_t.Freehigh", Field, 0}, + {"Sysinfo_t.Freeram", Field, 0}, + {"Sysinfo_t.Freeswap", Field, 0}, + {"Sysinfo_t.Loads", Field, 0}, + {"Sysinfo_t.Pad", Field, 0}, + {"Sysinfo_t.Pad_cgo_0", Field, 0}, + {"Sysinfo_t.Pad_cgo_1", Field, 0}, + {"Sysinfo_t.Procs", Field, 0}, + {"Sysinfo_t.Sharedram", Field, 0}, + {"Sysinfo_t.Totalhigh", Field, 0}, + {"Sysinfo_t.Totalram", Field, 0}, + {"Sysinfo_t.Totalswap", Field, 0}, + {"Sysinfo_t.Unit", Field, 0}, + {"Sysinfo_t.Uptime", Field, 0}, + {"Sysinfo_t.X_f", Field, 0}, + {"Systemtime", Type, 0}, + {"Systemtime.Day", Field, 0}, + {"Systemtime.DayOfWeek", Field, 0}, + {"Systemtime.Hour", Field, 0}, + {"Systemtime.Milliseconds", Field, 0}, + {"Systemtime.Minute", Field, 0}, + {"Systemtime.Month", Field, 0}, + {"Systemtime.Second", Field, 0}, + {"Systemtime.Year", Field, 0}, + {"TCGETS", Const, 0}, + {"TCIFLUSH", Const, 1}, + {"TCIOFLUSH", Const, 1}, + {"TCOFLUSH", Const, 1}, + {"TCPInfo", Type, 1}, + {"TCPInfo.Advmss", Field, 1}, + {"TCPInfo.Ato", Field, 1}, + {"TCPInfo.Backoff", Field, 1}, + {"TCPInfo.Ca_state", Field, 1}, + {"TCPInfo.Fackets", Field, 1}, + {"TCPInfo.Last_ack_recv", Field, 1}, + {"TCPInfo.Last_ack_sent", Field, 1}, + {"TCPInfo.Last_data_recv", Field, 1}, + {"TCPInfo.Last_data_sent", Field, 1}, + {"TCPInfo.Lost", Field, 1}, + {"TCPInfo.Options", Field, 1}, + {"TCPInfo.Pad_cgo_0", Field, 1}, + {"TCPInfo.Pmtu", Field, 1}, + {"TCPInfo.Probes", Field, 1}, + {"TCPInfo.Rcv_mss", Field, 1}, + {"TCPInfo.Rcv_rtt", Field, 1}, + {"TCPInfo.Rcv_space", Field, 1}, + {"TCPInfo.Rcv_ssthresh", Field, 1}, + {"TCPInfo.Reordering", Field, 1}, + {"TCPInfo.Retrans", Field, 1}, + {"TCPInfo.Retransmits", Field, 1}, + {"TCPInfo.Rto", Field, 1}, + {"TCPInfo.Rtt", Field, 1}, + {"TCPInfo.Rttvar", Field, 1}, + {"TCPInfo.Sacked", Field, 1}, + {"TCPInfo.Snd_cwnd", Field, 1}, + {"TCPInfo.Snd_mss", Field, 1}, + {"TCPInfo.Snd_ssthresh", Field, 1}, + {"TCPInfo.State", Field, 1}, + {"TCPInfo.Total_retrans", Field, 1}, + {"TCPInfo.Unacked", Field, 1}, + {"TCPKeepalive", Type, 3}, + {"TCPKeepalive.Interval", Field, 3}, + {"TCPKeepalive.OnOff", Field, 3}, + {"TCPKeepalive.Time", Field, 3}, + {"TCP_CA_NAME_MAX", Const, 0}, + {"TCP_CONGCTL", Const, 1}, + {"TCP_CONGESTION", Const, 0}, + {"TCP_CONNECTIONTIMEOUT", Const, 0}, + {"TCP_CORK", Const, 0}, + {"TCP_DEFER_ACCEPT", Const, 0}, + {"TCP_ENABLE_ECN", Const, 16}, + {"TCP_INFO", Const, 0}, + {"TCP_KEEPALIVE", Const, 0}, + {"TCP_KEEPCNT", Const, 0}, + {"TCP_KEEPIDLE", Const, 0}, + {"TCP_KEEPINIT", Const, 1}, + {"TCP_KEEPINTVL", Const, 0}, + {"TCP_LINGER2", Const, 0}, + {"TCP_MAXBURST", Const, 0}, + {"TCP_MAXHLEN", Const, 0}, + {"TCP_MAXOLEN", Const, 0}, + {"TCP_MAXSEG", Const, 0}, + {"TCP_MAXWIN", Const, 0}, + {"TCP_MAX_SACK", Const, 0}, + {"TCP_MAX_WINSHIFT", Const, 0}, + {"TCP_MD5SIG", Const, 0}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0}, + {"TCP_MINMSS", Const, 0}, + {"TCP_MINMSSOVERLOAD", Const, 0}, + {"TCP_MSS", Const, 0}, + {"TCP_NODELAY", Const, 0}, + {"TCP_NOOPT", Const, 0}, + {"TCP_NOPUSH", Const, 0}, + {"TCP_NOTSENT_LOWAT", Const, 16}, + {"TCP_NSTATES", Const, 1}, + {"TCP_QUICKACK", Const, 0}, + {"TCP_RXT_CONNDROPTIME", Const, 0}, + {"TCP_RXT_FINDROP", Const, 0}, + {"TCP_SACK_ENABLE", Const, 1}, + {"TCP_SENDMOREACKS", Const, 16}, + {"TCP_SYNCNT", Const, 0}, + {"TCP_VENDOR", Const, 3}, + {"TCP_WINDOW_CLAMP", Const, 0}, + {"TCSAFLUSH", Const, 1}, + {"TCSETS", Const, 0}, + {"TF_DISCONNECT", Const, 0}, + {"TF_REUSE_SOCKET", Const, 0}, + {"TF_USE_DEFAULT_WORKER", Const, 0}, + {"TF_USE_KERNEL_APC", Const, 0}, + {"TF_USE_SYSTEM_THREAD", Const, 0}, + {"TF_WRITE_BEHIND", Const, 0}, + {"TH32CS_INHERIT", Const, 4}, + {"TH32CS_SNAPALL", Const, 4}, + {"TH32CS_SNAPHEAPLIST", Const, 4}, + {"TH32CS_SNAPMODULE", Const, 4}, + {"TH32CS_SNAPMODULE32", Const, 4}, + {"TH32CS_SNAPPROCESS", Const, 4}, + {"TH32CS_SNAPTHREAD", Const, 4}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0}, + {"TIME_ZONE_ID_STANDARD", Const, 0}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0}, + {"TIOCCBRK", Const, 0}, + {"TIOCCDTR", Const, 0}, + {"TIOCCONS", Const, 0}, + {"TIOCDCDTIMESTAMP", Const, 0}, + {"TIOCDRAIN", Const, 0}, + {"TIOCDSIMICROCODE", Const, 0}, + {"TIOCEXCL", Const, 0}, + {"TIOCEXT", Const, 0}, + {"TIOCFLAG_CDTRCTS", Const, 1}, + {"TIOCFLAG_CLOCAL", Const, 1}, + {"TIOCFLAG_CRTSCTS", Const, 1}, + {"TIOCFLAG_MDMBUF", Const, 1}, + {"TIOCFLAG_PPS", Const, 1}, + {"TIOCFLAG_SOFTCAR", Const, 1}, + {"TIOCFLUSH", Const, 0}, + {"TIOCGDEV", Const, 0}, + {"TIOCGDRAINWAIT", Const, 0}, + {"TIOCGETA", Const, 0}, + {"TIOCGETD", Const, 0}, + {"TIOCGFLAGS", Const, 1}, + {"TIOCGICOUNT", Const, 0}, + {"TIOCGLCKTRMIOS", Const, 0}, + {"TIOCGLINED", Const, 1}, + {"TIOCGPGRP", Const, 0}, + {"TIOCGPTN", Const, 0}, + {"TIOCGQSIZE", Const, 1}, + {"TIOCGRANTPT", Const, 1}, + {"TIOCGRS485", Const, 0}, + {"TIOCGSERIAL", Const, 0}, + {"TIOCGSID", Const, 0}, + {"TIOCGSIZE", Const, 1}, + {"TIOCGSOFTCAR", Const, 0}, + {"TIOCGTSTAMP", Const, 1}, + {"TIOCGWINSZ", Const, 0}, + {"TIOCINQ", Const, 0}, + {"TIOCIXOFF", Const, 0}, + {"TIOCIXON", Const, 0}, + {"TIOCLINUX", Const, 0}, + {"TIOCMBIC", Const, 0}, + {"TIOCMBIS", Const, 0}, + {"TIOCMGDTRWAIT", Const, 0}, + {"TIOCMGET", Const, 0}, + {"TIOCMIWAIT", Const, 0}, + {"TIOCMODG", Const, 0}, + {"TIOCMODS", Const, 0}, + {"TIOCMSDTRWAIT", Const, 0}, + {"TIOCMSET", Const, 0}, + {"TIOCM_CAR", Const, 0}, + {"TIOCM_CD", Const, 0}, + {"TIOCM_CTS", Const, 0}, + {"TIOCM_DCD", Const, 0}, + {"TIOCM_DSR", Const, 0}, + {"TIOCM_DTR", Const, 0}, + {"TIOCM_LE", Const, 0}, + {"TIOCM_RI", Const, 0}, + {"TIOCM_RNG", Const, 0}, + {"TIOCM_RTS", Const, 0}, + {"TIOCM_SR", Const, 0}, + {"TIOCM_ST", Const, 0}, + {"TIOCNOTTY", Const, 0}, + {"TIOCNXCL", Const, 0}, + {"TIOCOUTQ", Const, 0}, + {"TIOCPKT", Const, 0}, + {"TIOCPKT_DATA", Const, 0}, + {"TIOCPKT_DOSTOP", Const, 0}, + {"TIOCPKT_FLUSHREAD", Const, 0}, + {"TIOCPKT_FLUSHWRITE", Const, 0}, + {"TIOCPKT_IOCTL", Const, 0}, + {"TIOCPKT_NOSTOP", Const, 0}, + {"TIOCPKT_START", Const, 0}, + {"TIOCPKT_STOP", Const, 0}, + {"TIOCPTMASTER", Const, 0}, + {"TIOCPTMGET", Const, 1}, + {"TIOCPTSNAME", Const, 1}, + {"TIOCPTYGNAME", Const, 0}, + {"TIOCPTYGRANT", Const, 0}, + {"TIOCPTYUNLK", Const, 0}, + {"TIOCRCVFRAME", Const, 1}, + {"TIOCREMOTE", Const, 0}, + {"TIOCSBRK", Const, 0}, + {"TIOCSCONS", Const, 0}, + {"TIOCSCTTY", Const, 0}, + {"TIOCSDRAINWAIT", Const, 0}, + {"TIOCSDTR", Const, 0}, + {"TIOCSERCONFIG", Const, 0}, + {"TIOCSERGETLSR", Const, 0}, + {"TIOCSERGETMULTI", Const, 0}, + {"TIOCSERGSTRUCT", Const, 0}, + {"TIOCSERGWILD", Const, 0}, + {"TIOCSERSETMULTI", Const, 0}, + {"TIOCSERSWILD", Const, 0}, + {"TIOCSER_TEMT", Const, 0}, + {"TIOCSETA", Const, 0}, + {"TIOCSETAF", Const, 0}, + {"TIOCSETAW", Const, 0}, + {"TIOCSETD", Const, 0}, + {"TIOCSFLAGS", Const, 1}, + {"TIOCSIG", Const, 0}, + {"TIOCSLCKTRMIOS", Const, 0}, + {"TIOCSLINED", Const, 1}, + {"TIOCSPGRP", Const, 0}, + {"TIOCSPTLCK", Const, 0}, + {"TIOCSQSIZE", Const, 1}, + {"TIOCSRS485", Const, 0}, + {"TIOCSSERIAL", Const, 0}, + {"TIOCSSIZE", Const, 1}, + {"TIOCSSOFTCAR", Const, 0}, + {"TIOCSTART", Const, 0}, + {"TIOCSTAT", Const, 0}, + {"TIOCSTI", Const, 0}, + {"TIOCSTOP", Const, 0}, + {"TIOCSTSTAMP", Const, 1}, + {"TIOCSWINSZ", Const, 0}, + {"TIOCTIMESTAMP", Const, 0}, + {"TIOCUCNTL", Const, 0}, + {"TIOCVHANGUP", Const, 0}, + {"TIOCXMTFRAME", Const, 1}, + {"TOKEN_ADJUST_DEFAULT", Const, 0}, + {"TOKEN_ADJUST_GROUPS", Const, 0}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0}, + {"TOKEN_ADJUST_SESSIONID", Const, 11}, + {"TOKEN_ALL_ACCESS", Const, 0}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0}, + {"TOKEN_DUPLICATE", Const, 0}, + {"TOKEN_EXECUTE", Const, 0}, + {"TOKEN_IMPERSONATE", Const, 0}, + {"TOKEN_QUERY", Const, 0}, + {"TOKEN_QUERY_SOURCE", Const, 0}, + {"TOKEN_READ", Const, 0}, + {"TOKEN_WRITE", Const, 0}, + {"TOSTOP", Const, 0}, + {"TRUNCATE_EXISTING", Const, 0}, + {"TUNATTACHFILTER", Const, 0}, + {"TUNDETACHFILTER", Const, 0}, + {"TUNGETFEATURES", Const, 0}, + {"TUNGETIFF", Const, 0}, + {"TUNGETSNDBUF", Const, 0}, + {"TUNGETVNETHDRSZ", Const, 0}, + {"TUNSETDEBUG", Const, 0}, + {"TUNSETGROUP", Const, 0}, + {"TUNSETIFF", Const, 0}, + {"TUNSETLINK", Const, 0}, + {"TUNSETNOCSUM", Const, 0}, + {"TUNSETOFFLOAD", Const, 0}, + {"TUNSETOWNER", Const, 0}, + {"TUNSETPERSIST", Const, 0}, + {"TUNSETSNDBUF", Const, 0}, + {"TUNSETTXFILTER", Const, 0}, + {"TUNSETVNETHDRSZ", Const, 0}, + {"Tee", Func, 0}, + {"TerminateProcess", Func, 0}, + {"Termios", Type, 0}, + {"Termios.Cc", Field, 0}, + {"Termios.Cflag", Field, 0}, + {"Termios.Iflag", Field, 0}, + {"Termios.Ispeed", Field, 0}, + {"Termios.Lflag", Field, 0}, + {"Termios.Line", Field, 0}, + {"Termios.Oflag", Field, 0}, + {"Termios.Ospeed", Field, 0}, + {"Termios.Pad_cgo_0", Field, 0}, + {"Tgkill", Func, 0}, + {"Time", Func, 0}, + {"Time_t", Type, 0}, + {"Times", Func, 0}, + {"Timespec", Type, 0}, + {"Timespec.Nsec", Field, 0}, + {"Timespec.Pad_cgo_0", Field, 2}, + {"Timespec.Sec", Field, 0}, + {"TimespecToNsec", Func, 0}, + {"Timeval", Type, 0}, + {"Timeval.Pad_cgo_0", Field, 0}, + {"Timeval.Sec", Field, 0}, + {"Timeval.Usec", Field, 0}, + {"Timeval32", Type, 0}, + {"Timeval32.Sec", Field, 0}, + {"Timeval32.Usec", Field, 0}, + {"TimevalToNsec", Func, 0}, + {"Timex", Type, 0}, + {"Timex.Calcnt", Field, 0}, + {"Timex.Constant", Field, 0}, + {"Timex.Errcnt", Field, 0}, + {"Timex.Esterror", Field, 0}, + {"Timex.Freq", Field, 0}, + {"Timex.Jitcnt", Field, 0}, + {"Timex.Jitter", Field, 0}, + {"Timex.Maxerror", Field, 0}, + {"Timex.Modes", Field, 0}, + {"Timex.Offset", Field, 0}, + {"Timex.Pad_cgo_0", Field, 0}, + {"Timex.Pad_cgo_1", Field, 0}, + {"Timex.Pad_cgo_2", Field, 0}, + {"Timex.Pad_cgo_3", Field, 0}, + {"Timex.Ppsfreq", Field, 0}, + {"Timex.Precision", Field, 0}, + {"Timex.Shift", Field, 0}, + {"Timex.Stabil", Field, 0}, + {"Timex.Status", Field, 0}, + {"Timex.Stbcnt", Field, 0}, + {"Timex.Tai", Field, 0}, + {"Timex.Tick", Field, 0}, + {"Timex.Time", Field, 0}, + {"Timex.Tolerance", Field, 0}, + {"Timezoneinformation", Type, 0}, + {"Timezoneinformation.Bias", Field, 0}, + {"Timezoneinformation.DaylightBias", Field, 0}, + {"Timezoneinformation.DaylightDate", Field, 0}, + {"Timezoneinformation.DaylightName", Field, 0}, + {"Timezoneinformation.StandardBias", Field, 0}, + {"Timezoneinformation.StandardDate", Field, 0}, + {"Timezoneinformation.StandardName", Field, 0}, + {"Tms", Type, 0}, + {"Tms.Cstime", Field, 0}, + {"Tms.Cutime", Field, 0}, + {"Tms.Stime", Field, 0}, + {"Tms.Utime", Field, 0}, + {"Token", Type, 0}, + {"TokenAccessInformation", Const, 0}, + {"TokenAuditPolicy", Const, 0}, + {"TokenDefaultDacl", Const, 0}, + {"TokenElevation", Const, 0}, + {"TokenElevationType", Const, 0}, + {"TokenGroups", Const, 0}, + {"TokenGroupsAndPrivileges", Const, 0}, + {"TokenHasRestrictions", Const, 0}, + {"TokenImpersonationLevel", Const, 0}, + {"TokenIntegrityLevel", Const, 0}, + {"TokenLinkedToken", Const, 0}, + {"TokenLogonSid", Const, 0}, + {"TokenMandatoryPolicy", Const, 0}, + {"TokenOrigin", Const, 0}, + {"TokenOwner", Const, 0}, + {"TokenPrimaryGroup", Const, 0}, + {"TokenPrivileges", Const, 0}, + {"TokenRestrictedSids", Const, 0}, + {"TokenSandBoxInert", Const, 0}, + {"TokenSessionId", Const, 0}, + {"TokenSessionReference", Const, 0}, + {"TokenSource", Const, 0}, + {"TokenStatistics", Const, 0}, + {"TokenType", Const, 0}, + {"TokenUIAccess", Const, 0}, + {"TokenUser", Const, 0}, + {"TokenVirtualizationAllowed", Const, 0}, + {"TokenVirtualizationEnabled", Const, 0}, + {"Tokenprimarygroup", Type, 0}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0}, + {"Tokenuser", Type, 0}, + {"Tokenuser.User", Field, 0}, + {"TranslateAccountName", Func, 0}, + {"TranslateName", Func, 0}, + {"TransmitFile", Func, 0}, + {"TransmitFileBuffers", Type, 0}, + {"TransmitFileBuffers.Head", Field, 0}, + {"TransmitFileBuffers.HeadLength", Field, 0}, + {"TransmitFileBuffers.Tail", Field, 0}, + {"TransmitFileBuffers.TailLength", Field, 0}, + {"Truncate", Func, 0}, + {"UNIX_PATH_MAX", Const, 12}, + {"USAGE_MATCH_TYPE_AND", Const, 0}, + {"USAGE_MATCH_TYPE_OR", Const, 0}, + {"UTF16FromString", Func, 1}, + {"UTF16PtrFromString", Func, 1}, + {"UTF16ToString", Func, 0}, + {"Ucred", Type, 0}, + {"Ucred.Gid", Field, 0}, + {"Ucred.Pid", Field, 0}, + {"Ucred.Uid", Field, 0}, + {"Umask", Func, 0}, + {"Uname", Func, 0}, + {"Undelete", Func, 0}, + {"UnixCredentials", Func, 0}, + {"UnixRights", Func, 0}, + {"Unlink", Func, 0}, + {"Unlinkat", Func, 0}, + {"UnmapViewOfFile", Func, 0}, + {"Unmount", Func, 0}, + {"Unsetenv", Func, 4}, + {"Unshare", Func, 0}, + {"UserInfo10", Type, 0}, + {"UserInfo10.Comment", Field, 0}, + {"UserInfo10.FullName", Field, 0}, + {"UserInfo10.Name", Field, 0}, + {"UserInfo10.UsrComment", Field, 0}, + {"Ustat", Func, 0}, + {"Ustat_t", Type, 0}, + {"Ustat_t.Fname", Field, 0}, + {"Ustat_t.Fpack", Field, 0}, + {"Ustat_t.Pad_cgo_0", Field, 0}, + {"Ustat_t.Pad_cgo_1", Field, 0}, + {"Ustat_t.Tfree", Field, 0}, + {"Ustat_t.Tinode", Field, 0}, + {"Utimbuf", Type, 0}, + {"Utimbuf.Actime", Field, 0}, + {"Utimbuf.Modtime", Field, 0}, + {"Utime", Func, 0}, + {"Utimes", Func, 0}, + {"UtimesNano", Func, 1}, + {"Utsname", Type, 0}, + {"Utsname.Domainname", Field, 0}, + {"Utsname.Machine", Field, 0}, + {"Utsname.Nodename", Field, 0}, + {"Utsname.Release", Field, 0}, + {"Utsname.Sysname", Field, 0}, + {"Utsname.Version", Field, 0}, + {"VDISCARD", Const, 0}, + {"VDSUSP", Const, 1}, + {"VEOF", Const, 0}, + {"VEOL", Const, 0}, + {"VEOL2", Const, 0}, + {"VERASE", Const, 0}, + {"VERASE2", Const, 1}, + {"VINTR", Const, 0}, + {"VKILL", Const, 0}, + {"VLNEXT", Const, 0}, + {"VMIN", Const, 0}, + {"VQUIT", Const, 0}, + {"VREPRINT", Const, 0}, + {"VSTART", Const, 0}, + {"VSTATUS", Const, 1}, + {"VSTOP", Const, 0}, + {"VSUSP", Const, 0}, + {"VSWTC", Const, 0}, + {"VT0", Const, 1}, + {"VT1", Const, 1}, + {"VTDLY", Const, 1}, + {"VTIME", Const, 0}, + {"VWERASE", Const, 0}, + {"VirtualLock", Func, 0}, + {"VirtualUnlock", Func, 0}, + {"WAIT_ABANDONED", Const, 0}, + {"WAIT_FAILED", Const, 0}, + {"WAIT_OBJECT_0", Const, 0}, + {"WAIT_TIMEOUT", Const, 0}, + {"WALL", Const, 0}, + {"WALLSIG", Const, 1}, + {"WALTSIG", Const, 1}, + {"WCLONE", Const, 0}, + {"WCONTINUED", Const, 0}, + {"WCOREFLAG", Const, 0}, + {"WEXITED", Const, 0}, + {"WLINUXCLONE", Const, 0}, + {"WNOHANG", Const, 0}, + {"WNOTHREAD", Const, 0}, + {"WNOWAIT", Const, 0}, + {"WNOZOMBIE", Const, 1}, + {"WOPTSCHECKED", Const, 1}, + {"WORDSIZE", Const, 0}, + {"WSABuf", Type, 0}, + {"WSABuf.Buf", Field, 0}, + {"WSABuf.Len", Field, 0}, + {"WSACleanup", Func, 0}, + {"WSADESCRIPTION_LEN", Const, 0}, + {"WSAData", Type, 0}, + {"WSAData.Description", Field, 0}, + {"WSAData.HighVersion", Field, 0}, + {"WSAData.MaxSockets", Field, 0}, + {"WSAData.MaxUdpDg", Field, 0}, + {"WSAData.SystemStatus", Field, 0}, + {"WSAData.VendorInfo", Field, 0}, + {"WSAData.Version", Field, 0}, + {"WSAEACCES", Const, 2}, + {"WSAECONNABORTED", Const, 9}, + {"WSAECONNRESET", Const, 3}, + {"WSAEnumProtocols", Func, 2}, + {"WSAID_CONNECTEX", Var, 1}, + {"WSAIoctl", Func, 0}, + {"WSAPROTOCOL_LEN", Const, 2}, + {"WSAProtocolChain", Type, 2}, + {"WSAProtocolChain.ChainEntries", Field, 2}, + {"WSAProtocolChain.ChainLen", Field, 2}, + {"WSAProtocolInfo", Type, 2}, + {"WSAProtocolInfo.AddressFamily", Field, 2}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2}, + {"WSAProtocolInfo.MessageSize", Field, 2}, + {"WSAProtocolInfo.MinSockAddr", Field, 2}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2}, + {"WSAProtocolInfo.Protocol", Field, 2}, + {"WSAProtocolInfo.ProtocolChain", Field, 2}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2}, + {"WSAProtocolInfo.ProtocolName", Field, 2}, + {"WSAProtocolInfo.ProviderFlags", Field, 2}, + {"WSAProtocolInfo.ProviderId", Field, 2}, + {"WSAProtocolInfo.ProviderReserved", Field, 2}, + {"WSAProtocolInfo.SecurityScheme", Field, 2}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2}, + {"WSAProtocolInfo.SocketType", Field, 2}, + {"WSAProtocolInfo.Version", Field, 2}, + {"WSARecv", Func, 0}, + {"WSARecvFrom", Func, 0}, + {"WSASYS_STATUS_LEN", Const, 0}, + {"WSASend", Func, 0}, + {"WSASendTo", Func, 0}, + {"WSASendto", Func, 0}, + {"WSAStartup", Func, 0}, + {"WSTOPPED", Const, 0}, + {"WTRAPPED", Const, 1}, + {"WUNTRACED", Const, 0}, + {"Wait4", Func, 0}, + {"WaitForSingleObject", Func, 0}, + {"WaitStatus", Type, 0}, + {"WaitStatus.ExitCode", Field, 0}, + {"Win32FileAttributeData", Type, 0}, + {"Win32FileAttributeData.CreationTime", Field, 0}, + {"Win32FileAttributeData.FileAttributes", Field, 0}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0}, + {"Win32FileAttributeData.FileSizeLow", Field, 0}, + {"Win32FileAttributeData.LastAccessTime", Field, 0}, + {"Win32FileAttributeData.LastWriteTime", Field, 0}, + {"Win32finddata", Type, 0}, + {"Win32finddata.AlternateFileName", Field, 0}, + {"Win32finddata.CreationTime", Field, 0}, + {"Win32finddata.FileAttributes", Field, 0}, + {"Win32finddata.FileName", Field, 0}, + {"Win32finddata.FileSizeHigh", Field, 0}, + {"Win32finddata.FileSizeLow", Field, 0}, + {"Win32finddata.LastAccessTime", Field, 0}, + {"Win32finddata.LastWriteTime", Field, 0}, + {"Win32finddata.Reserved0", Field, 0}, + {"Win32finddata.Reserved1", Field, 0}, + {"Write", Func, 0}, + {"WriteConsole", Func, 1}, + {"WriteFile", Func, 0}, + {"X509_ASN_ENCODING", Const, 0}, + {"XCASE", Const, 0}, + {"XP1_CONNECTIONLESS", Const, 2}, + {"XP1_CONNECT_DATA", Const, 2}, + {"XP1_DISCONNECT_DATA", Const, 2}, + {"XP1_EXPEDITED_DATA", Const, 2}, + {"XP1_GRACEFUL_CLOSE", Const, 2}, + {"XP1_GUARANTEED_DELIVERY", Const, 2}, + {"XP1_GUARANTEED_ORDER", Const, 2}, + {"XP1_IFS_HANDLES", Const, 2}, + {"XP1_MESSAGE_ORIENTED", Const, 2}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2}, + {"XP1_PARTIAL_MESSAGE", Const, 2}, + {"XP1_PSEUDO_STREAM", Const, 2}, + {"XP1_QOS_SUPPORTED", Const, 2}, + {"XP1_SAN_SUPPORT_SDP", Const, 2}, + {"XP1_SUPPORT_BROADCAST", Const, 2}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2}, + {"XP1_UNI_RECV", Const, 2}, + {"XP1_UNI_SEND", Const, 2}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0}, + {"CopyBytesToJS", Func, 0}, + {"Error", Type, 0}, + {"Func", Type, 0}, + {"FuncOf", Func, 0}, + {"Global", Func, 0}, + {"Null", Func, 0}, + {"Type", Type, 0}, + {"TypeBoolean", Const, 0}, + {"TypeFunction", Const, 0}, + {"TypeNull", Const, 0}, + {"TypeNumber", Const, 0}, + {"TypeObject", Const, 0}, + {"TypeString", Const, 0}, + {"TypeSymbol", Const, 0}, + {"TypeUndefined", Const, 0}, + {"Undefined", Func, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueOf", Func, 0}, + }, + "testing": { + {"(*B).Cleanup", Method, 14}, + {"(*B).Elapsed", Method, 20}, + {"(*B).Error", Method, 0}, + {"(*B).Errorf", Method, 0}, + {"(*B).Fail", Method, 0}, + {"(*B).FailNow", Method, 0}, + {"(*B).Failed", Method, 0}, + {"(*B).Fatal", Method, 0}, + {"(*B).Fatalf", Method, 0}, + {"(*B).Helper", Method, 9}, + {"(*B).Log", Method, 0}, + {"(*B).Logf", Method, 0}, + {"(*B).Name", Method, 8}, + {"(*B).ReportAllocs", Method, 1}, + {"(*B).ReportMetric", Method, 13}, + {"(*B).ResetTimer", Method, 0}, + {"(*B).Run", Method, 7}, + {"(*B).RunParallel", Method, 3}, + {"(*B).SetBytes", Method, 0}, + {"(*B).SetParallelism", Method, 3}, + {"(*B).Setenv", Method, 17}, + {"(*B).Skip", Method, 1}, + {"(*B).SkipNow", Method, 1}, + {"(*B).Skipf", Method, 1}, + {"(*B).Skipped", Method, 1}, + {"(*B).StartTimer", Method, 0}, + {"(*B).StopTimer", Method, 0}, + {"(*B).TempDir", Method, 15}, + {"(*F).Add", Method, 18}, + {"(*F).Cleanup", Method, 18}, + {"(*F).Error", Method, 18}, + {"(*F).Errorf", Method, 18}, + {"(*F).Fail", Method, 18}, + {"(*F).FailNow", Method, 18}, + {"(*F).Failed", Method, 18}, + {"(*F).Fatal", Method, 18}, + {"(*F).Fatalf", Method, 18}, + {"(*F).Fuzz", Method, 18}, + {"(*F).Helper", Method, 18}, + {"(*F).Log", Method, 18}, + {"(*F).Logf", Method, 18}, + {"(*F).Name", Method, 18}, + {"(*F).Setenv", Method, 18}, + {"(*F).Skip", Method, 18}, + {"(*F).SkipNow", Method, 18}, + {"(*F).Skipf", Method, 18}, + {"(*F).Skipped", Method, 18}, + {"(*F).TempDir", Method, 18}, + {"(*M).Run", Method, 4}, + {"(*PB).Next", Method, 3}, + {"(*T).Cleanup", Method, 14}, + {"(*T).Deadline", Method, 15}, + {"(*T).Error", Method, 0}, + {"(*T).Errorf", Method, 0}, + {"(*T).Fail", Method, 0}, + {"(*T).FailNow", Method, 0}, + {"(*T).Failed", Method, 0}, + {"(*T).Fatal", Method, 0}, + {"(*T).Fatalf", Method, 0}, + {"(*T).Helper", Method, 9}, + {"(*T).Log", Method, 0}, + {"(*T).Logf", Method, 0}, + {"(*T).Name", Method, 8}, + {"(*T).Parallel", Method, 0}, + {"(*T).Run", Method, 7}, + {"(*T).Setenv", Method, 17}, + {"(*T).Skip", Method, 1}, + {"(*T).SkipNow", Method, 1}, + {"(*T).Skipf", Method, 1}, + {"(*T).Skipped", Method, 1}, + {"(*T).TempDir", Method, 15}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1}, + {"(BenchmarkResult).AllocsPerOp", Method, 1}, + {"(BenchmarkResult).MemString", Method, 1}, + {"(BenchmarkResult).NsPerOp", Method, 0}, + {"(BenchmarkResult).String", Method, 0}, + {"AllocsPerRun", Func, 1}, + {"B", Type, 0}, + {"B.N", Field, 0}, + {"Benchmark", Func, 0}, + {"BenchmarkResult", Type, 0}, + {"BenchmarkResult.Bytes", Field, 0}, + {"BenchmarkResult.Extra", Field, 13}, + {"BenchmarkResult.MemAllocs", Field, 1}, + {"BenchmarkResult.MemBytes", Field, 1}, + {"BenchmarkResult.N", Field, 0}, + {"BenchmarkResult.T", Field, 0}, + {"Cover", Type, 2}, + {"Cover.Blocks", Field, 2}, + {"Cover.Counters", Field, 2}, + {"Cover.CoveredPackages", Field, 2}, + {"Cover.Mode", Field, 2}, + {"CoverBlock", Type, 2}, + {"CoverBlock.Col0", Field, 2}, + {"CoverBlock.Col1", Field, 2}, + {"CoverBlock.Line0", Field, 2}, + {"CoverBlock.Line1", Field, 2}, + {"CoverBlock.Stmts", Field, 2}, + {"CoverMode", Func, 8}, + {"Coverage", Func, 4}, + {"F", Type, 18}, + {"Init", Func, 13}, + {"InternalBenchmark", Type, 0}, + {"InternalBenchmark.F", Field, 0}, + {"InternalBenchmark.Name", Field, 0}, + {"InternalExample", Type, 0}, + {"InternalExample.F", Field, 0}, + {"InternalExample.Name", Field, 0}, + {"InternalExample.Output", Field, 0}, + {"InternalExample.Unordered", Field, 7}, + {"InternalFuzzTarget", Type, 18}, + {"InternalFuzzTarget.Fn", Field, 18}, + {"InternalFuzzTarget.Name", Field, 18}, + {"InternalTest", Type, 0}, + {"InternalTest.F", Field, 0}, + {"InternalTest.Name", Field, 0}, + {"M", Type, 4}, + {"Main", Func, 0}, + {"MainStart", Func, 4}, + {"PB", Type, 3}, + {"RegisterCover", Func, 2}, + {"RunBenchmarks", Func, 0}, + {"RunExamples", Func, 0}, + {"RunTests", Func, 0}, + {"Short", Func, 0}, + {"T", Type, 0}, + {"TB", Type, 2}, + {"Testing", Func, 21}, + {"Verbose", Func, 1}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16}, + {"(MapFS).Open", Method, 16}, + {"(MapFS).ReadDir", Method, 16}, + {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).Stat", Method, 16}, + {"(MapFS).Sub", Method, 16}, + {"MapFS", Type, 16}, + {"MapFile", Type, 16}, + {"MapFile.Data", Field, 16}, + {"MapFile.ModTime", Field, 16}, + {"MapFile.Mode", Field, 16}, + {"MapFile.Sys", Field, 16}, + {"TestFS", Func, 16}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0}, + {"ErrReader", Func, 16}, + {"ErrTimeout", Var, 0}, + {"HalfReader", Func, 0}, + {"NewReadLogger", Func, 0}, + {"NewWriteLogger", Func, 0}, + {"OneByteReader", Func, 0}, + {"TestReader", Func, 16}, + {"TimeoutReader", Func, 0}, + {"TruncateWriter", Func, 0}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0}, + {"(*CheckError).Error", Method, 0}, + {"(SetupError).Error", Method, 0}, + {"Check", Func, 0}, + {"CheckEqual", Func, 0}, + {"CheckEqualError", Type, 0}, + {"CheckEqualError.CheckError", Field, 0}, + {"CheckEqualError.Out1", Field, 0}, + {"CheckEqualError.Out2", Field, 0}, + {"CheckError", Type, 0}, + {"CheckError.Count", Field, 0}, + {"CheckError.In", Field, 0}, + {"Config", Type, 0}, + {"Config.MaxCount", Field, 0}, + {"Config.MaxCountScale", Field, 0}, + {"Config.Rand", Field, 0}, + {"Config.Values", Field, 0}, + {"Generator", Type, 0}, + {"SetupError", Type, 0}, + {"Value", Func, 0}, + }, + "testing/slogtest": { + {"Run", Func, 22}, + {"TestHandler", Func, 21}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).IsValid", Method, 0}, + {"(*Scanner).Next", Method, 0}, + {"(*Scanner).Peek", Method, 0}, + {"(*Scanner).Pos", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(*Scanner).TokenText", Method, 0}, + {"(Position).String", Method, 0}, + {"(Scanner).String", Method, 0}, + {"Char", Const, 0}, + {"Comment", Const, 0}, + {"EOF", Const, 0}, + {"Float", Const, 0}, + {"GoTokens", Const, 0}, + {"GoWhitespace", Const, 0}, + {"Ident", Const, 0}, + {"Int", Const, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"RawString", Const, 0}, + {"ScanChars", Const, 0}, + {"ScanComments", Const, 0}, + {"ScanFloats", Const, 0}, + {"ScanIdents", Const, 0}, + {"ScanInts", Const, 0}, + {"ScanRawStrings", Const, 0}, + {"ScanStrings", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.Error", Field, 0}, + {"Scanner.ErrorCount", Field, 0}, + {"Scanner.IsIdentRune", Field, 4}, + {"Scanner.Mode", Field, 0}, + {"Scanner.Position", Field, 0}, + {"Scanner.Whitespace", Field, 0}, + {"SkipComments", Const, 0}, + {"String", Const, 0}, + {"TokenString", Func, 0}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Init", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"AlignRight", Const, 0}, + {"Debug", Const, 0}, + {"DiscardEmptyColumns", Const, 0}, + {"Escape", Const, 0}, + {"FilterHTML", Const, 0}, + {"NewWriter", Func, 0}, + {"StripEscape", Const, 0}, + {"TabIndent", Const, 0}, + {"Writer", Type, 0}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 5}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"(ExecError).Error", Method, 6}, + {"(ExecError).Unwrap", Method, 13}, + {"(Template).Copy", Method, 2}, + {"(Template).ErrorContext", Method, 1}, + {"ExecError", Type, 6}, + {"ExecError.Err", Field, 6}, + {"ExecError.Name", Field, 6}, + {"FuncMap", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Template", Type, 0}, + {"Template.Tree", Field, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0}, + {"(*ActionNode).String", Method, 0}, + {"(*BoolNode).Copy", Method, 0}, + {"(*BoolNode).String", Method, 0}, + {"(*BranchNode).Copy", Method, 4}, + {"(*BranchNode).String", Method, 0}, + {"(*BreakNode).Copy", Method, 18}, + {"(*BreakNode).String", Method, 18}, + {"(*ChainNode).Add", Method, 1}, + {"(*ChainNode).Copy", Method, 1}, + {"(*ChainNode).String", Method, 1}, + {"(*CommandNode).Copy", Method, 0}, + {"(*CommandNode).String", Method, 0}, + {"(*CommentNode).Copy", Method, 16}, + {"(*CommentNode).String", Method, 16}, + {"(*ContinueNode).Copy", Method, 18}, + {"(*ContinueNode).String", Method, 18}, + {"(*DotNode).Copy", Method, 0}, + {"(*DotNode).String", Method, 0}, + {"(*DotNode).Type", Method, 0}, + {"(*FieldNode).Copy", Method, 0}, + {"(*FieldNode).String", Method, 0}, + {"(*IdentifierNode).Copy", Method, 0}, + {"(*IdentifierNode).SetPos", Method, 1}, + {"(*IdentifierNode).SetTree", Method, 4}, + {"(*IdentifierNode).String", Method, 0}, + {"(*IfNode).Copy", Method, 0}, + {"(*IfNode).String", Method, 0}, + {"(*ListNode).Copy", Method, 0}, + {"(*ListNode).CopyList", Method, 0}, + {"(*ListNode).String", Method, 0}, + {"(*NilNode).Copy", Method, 1}, + {"(*NilNode).String", Method, 1}, + {"(*NilNode).Type", Method, 1}, + {"(*NumberNode).Copy", Method, 0}, + {"(*NumberNode).String", Method, 0}, + {"(*PipeNode).Copy", Method, 0}, + {"(*PipeNode).CopyPipe", Method, 0}, + {"(*PipeNode).String", Method, 0}, + {"(*RangeNode).Copy", Method, 0}, + {"(*RangeNode).String", Method, 0}, + {"(*StringNode).Copy", Method, 0}, + {"(*StringNode).String", Method, 0}, + {"(*TemplateNode).Copy", Method, 0}, + {"(*TemplateNode).String", Method, 0}, + {"(*TextNode).Copy", Method, 0}, + {"(*TextNode).String", Method, 0}, + {"(*Tree).Copy", Method, 2}, + {"(*Tree).ErrorContext", Method, 1}, + {"(*Tree).Parse", Method, 0}, + {"(*VariableNode).Copy", Method, 0}, + {"(*VariableNode).String", Method, 0}, + {"(*WithNode).Copy", Method, 0}, + {"(*WithNode).String", Method, 0}, + {"(ActionNode).Position", Method, 1}, + {"(ActionNode).Type", Method, 0}, + {"(BoolNode).Position", Method, 1}, + {"(BoolNode).Type", Method, 0}, + {"(BranchNode).Position", Method, 1}, + {"(BranchNode).Type", Method, 0}, + {"(BreakNode).Position", Method, 18}, + {"(BreakNode).Type", Method, 18}, + {"(ChainNode).Position", Method, 1}, + {"(ChainNode).Type", Method, 1}, + {"(CommandNode).Position", Method, 1}, + {"(CommandNode).Type", Method, 0}, + {"(CommentNode).Position", Method, 16}, + {"(CommentNode).Type", Method, 16}, + {"(ContinueNode).Position", Method, 18}, + {"(ContinueNode).Type", Method, 18}, + {"(DotNode).Position", Method, 1}, + {"(FieldNode).Position", Method, 1}, + {"(FieldNode).Type", Method, 0}, + {"(IdentifierNode).Position", Method, 1}, + {"(IdentifierNode).Type", Method, 0}, + {"(IfNode).Position", Method, 1}, + {"(IfNode).Type", Method, 0}, + {"(ListNode).Position", Method, 1}, + {"(ListNode).Type", Method, 0}, + {"(NilNode).Position", Method, 1}, + {"(NodeType).Type", Method, 0}, + {"(NumberNode).Position", Method, 1}, + {"(NumberNode).Type", Method, 0}, + {"(PipeNode).Position", Method, 1}, + {"(PipeNode).Type", Method, 0}, + {"(Pos).Position", Method, 1}, + {"(RangeNode).Position", Method, 1}, + {"(RangeNode).Type", Method, 0}, + {"(StringNode).Position", Method, 1}, + {"(StringNode).Type", Method, 0}, + {"(TemplateNode).Position", Method, 1}, + {"(TemplateNode).Type", Method, 0}, + {"(TextNode).Position", Method, 1}, + {"(TextNode).Type", Method, 0}, + {"(VariableNode).Position", Method, 1}, + {"(VariableNode).Type", Method, 0}, + {"(WithNode).Position", Method, 1}, + {"(WithNode).Type", Method, 0}, + {"ActionNode", Type, 0}, + {"ActionNode.Line", Field, 0}, + {"ActionNode.NodeType", Field, 0}, + {"ActionNode.Pipe", Field, 0}, + {"ActionNode.Pos", Field, 1}, + {"BoolNode", Type, 0}, + {"BoolNode.NodeType", Field, 0}, + {"BoolNode.Pos", Field, 1}, + {"BoolNode.True", Field, 0}, + {"BranchNode", Type, 0}, + {"BranchNode.ElseList", Field, 0}, + {"BranchNode.Line", Field, 0}, + {"BranchNode.List", Field, 0}, + {"BranchNode.NodeType", Field, 0}, + {"BranchNode.Pipe", Field, 0}, + {"BranchNode.Pos", Field, 1}, + {"BreakNode", Type, 18}, + {"BreakNode.Line", Field, 18}, + {"BreakNode.NodeType", Field, 18}, + {"BreakNode.Pos", Field, 18}, + {"ChainNode", Type, 1}, + {"ChainNode.Field", Field, 1}, + {"ChainNode.Node", Field, 1}, + {"ChainNode.NodeType", Field, 1}, + {"ChainNode.Pos", Field, 1}, + {"CommandNode", Type, 0}, + {"CommandNode.Args", Field, 0}, + {"CommandNode.NodeType", Field, 0}, + {"CommandNode.Pos", Field, 1}, + {"CommentNode", Type, 16}, + {"CommentNode.NodeType", Field, 16}, + {"CommentNode.Pos", Field, 16}, + {"CommentNode.Text", Field, 16}, + {"ContinueNode", Type, 18}, + {"ContinueNode.Line", Field, 18}, + {"ContinueNode.NodeType", Field, 18}, + {"ContinueNode.Pos", Field, 18}, + {"DotNode", Type, 0}, + {"DotNode.NodeType", Field, 4}, + {"DotNode.Pos", Field, 1}, + {"FieldNode", Type, 0}, + {"FieldNode.Ident", Field, 0}, + {"FieldNode.NodeType", Field, 0}, + {"FieldNode.Pos", Field, 1}, + {"IdentifierNode", Type, 0}, + {"IdentifierNode.Ident", Field, 0}, + {"IdentifierNode.NodeType", Field, 0}, + {"IdentifierNode.Pos", Field, 1}, + {"IfNode", Type, 0}, + {"IfNode.BranchNode", Field, 0}, + {"IsEmptyTree", Func, 0}, + {"ListNode", Type, 0}, + {"ListNode.NodeType", Field, 0}, + {"ListNode.Nodes", Field, 0}, + {"ListNode.Pos", Field, 1}, + {"Mode", Type, 16}, + {"New", Func, 0}, + {"NewIdentifier", Func, 0}, + {"NilNode", Type, 1}, + {"NilNode.NodeType", Field, 4}, + {"NilNode.Pos", Field, 1}, + {"Node", Type, 0}, + {"NodeAction", Const, 0}, + {"NodeBool", Const, 0}, + {"NodeBreak", Const, 18}, + {"NodeChain", Const, 1}, + {"NodeCommand", Const, 0}, + {"NodeComment", Const, 16}, + {"NodeContinue", Const, 18}, + {"NodeDot", Const, 0}, + {"NodeField", Const, 0}, + {"NodeIdentifier", Const, 0}, + {"NodeIf", Const, 0}, + {"NodeList", Const, 0}, + {"NodeNil", Const, 1}, + {"NodeNumber", Const, 0}, + {"NodePipe", Const, 0}, + {"NodeRange", Const, 0}, + {"NodeString", Const, 0}, + {"NodeTemplate", Const, 0}, + {"NodeText", Const, 0}, + {"NodeType", Type, 0}, + {"NodeVariable", Const, 0}, + {"NodeWith", Const, 0}, + {"NumberNode", Type, 0}, + {"NumberNode.Complex128", Field, 0}, + {"NumberNode.Float64", Field, 0}, + {"NumberNode.Int64", Field, 0}, + {"NumberNode.IsComplex", Field, 0}, + {"NumberNode.IsFloat", Field, 0}, + {"NumberNode.IsInt", Field, 0}, + {"NumberNode.IsUint", Field, 0}, + {"NumberNode.NodeType", Field, 0}, + {"NumberNode.Pos", Field, 1}, + {"NumberNode.Text", Field, 0}, + {"NumberNode.Uint64", Field, 0}, + {"Parse", Func, 0}, + {"ParseComments", Const, 16}, + {"PipeNode", Type, 0}, + {"PipeNode.Cmds", Field, 0}, + {"PipeNode.Decl", Field, 0}, + {"PipeNode.IsAssign", Field, 11}, + {"PipeNode.Line", Field, 0}, + {"PipeNode.NodeType", Field, 0}, + {"PipeNode.Pos", Field, 1}, + {"Pos", Type, 1}, + {"RangeNode", Type, 0}, + {"RangeNode.BranchNode", Field, 0}, + {"SkipFuncCheck", Const, 17}, + {"StringNode", Type, 0}, + {"StringNode.NodeType", Field, 0}, + {"StringNode.Pos", Field, 1}, + {"StringNode.Quoted", Field, 0}, + {"StringNode.Text", Field, 0}, + {"TemplateNode", Type, 0}, + {"TemplateNode.Line", Field, 0}, + {"TemplateNode.Name", Field, 0}, + {"TemplateNode.NodeType", Field, 0}, + {"TemplateNode.Pipe", Field, 0}, + {"TemplateNode.Pos", Field, 1}, + {"TextNode", Type, 0}, + {"TextNode.NodeType", Field, 0}, + {"TextNode.Pos", Field, 1}, + {"TextNode.Text", Field, 0}, + {"Tree", Type, 0}, + {"Tree.Mode", Field, 16}, + {"Tree.Name", Field, 0}, + {"Tree.ParseName", Field, 1}, + {"Tree.Root", Field, 0}, + {"VariableNode", Type, 0}, + {"VariableNode.Ident", Field, 0}, + {"VariableNode.NodeType", Field, 0}, + {"VariableNode.Pos", Field, 1}, + {"WithNode", Type, 0}, + {"WithNode.BranchNode", Field, 0}, + }, + "time": { + {"(*Location).String", Method, 0}, + {"(*ParseError).Error", Method, 0}, + {"(*Ticker).Reset", Method, 15}, + {"(*Ticker).Stop", Method, 0}, + {"(*Time).GobDecode", Method, 0}, + {"(*Time).UnmarshalBinary", Method, 2}, + {"(*Time).UnmarshalJSON", Method, 0}, + {"(*Time).UnmarshalText", Method, 2}, + {"(*Timer).Reset", Method, 1}, + {"(*Timer).Stop", Method, 0}, + {"(Duration).Abs", Method, 19}, + {"(Duration).Hours", Method, 0}, + {"(Duration).Microseconds", Method, 13}, + {"(Duration).Milliseconds", Method, 13}, + {"(Duration).Minutes", Method, 0}, + {"(Duration).Nanoseconds", Method, 0}, + {"(Duration).Round", Method, 9}, + {"(Duration).Seconds", Method, 0}, + {"(Duration).String", Method, 0}, + {"(Duration).Truncate", Method, 9}, + {"(Month).String", Method, 0}, + {"(Time).Add", Method, 0}, + {"(Time).AddDate", Method, 0}, + {"(Time).After", Method, 0}, + {"(Time).AppendFormat", Method, 5}, + {"(Time).Before", Method, 0}, + {"(Time).Clock", Method, 0}, + {"(Time).Compare", Method, 20}, + {"(Time).Date", Method, 0}, + {"(Time).Day", Method, 0}, + {"(Time).Equal", Method, 0}, + {"(Time).Format", Method, 0}, + {"(Time).GoString", Method, 17}, + {"(Time).GobEncode", Method, 0}, + {"(Time).Hour", Method, 0}, + {"(Time).ISOWeek", Method, 0}, + {"(Time).In", Method, 0}, + {"(Time).IsDST", Method, 17}, + {"(Time).IsZero", Method, 0}, + {"(Time).Local", Method, 0}, + {"(Time).Location", Method, 0}, + {"(Time).MarshalBinary", Method, 2}, + {"(Time).MarshalJSON", Method, 0}, + {"(Time).MarshalText", Method, 2}, + {"(Time).Minute", Method, 0}, + {"(Time).Month", Method, 0}, + {"(Time).Nanosecond", Method, 0}, + {"(Time).Round", Method, 1}, + {"(Time).Second", Method, 0}, + {"(Time).String", Method, 0}, + {"(Time).Sub", Method, 0}, + {"(Time).Truncate", Method, 1}, + {"(Time).UTC", Method, 0}, + {"(Time).Unix", Method, 0}, + {"(Time).UnixMicro", Method, 17}, + {"(Time).UnixMilli", Method, 17}, + {"(Time).UnixNano", Method, 0}, + {"(Time).Weekday", Method, 0}, + {"(Time).Year", Method, 0}, + {"(Time).YearDay", Method, 1}, + {"(Time).Zone", Method, 0}, + {"(Time).ZoneBounds", Method, 19}, + {"(Weekday).String", Method, 0}, + {"ANSIC", Const, 0}, + {"After", Func, 0}, + {"AfterFunc", Func, 0}, + {"April", Const, 0}, + {"August", Const, 0}, + {"Date", Func, 0}, + {"DateOnly", Const, 20}, + {"DateTime", Const, 20}, + {"December", Const, 0}, + {"Duration", Type, 0}, + {"February", Const, 0}, + {"FixedZone", Func, 0}, + {"Friday", Const, 0}, + {"Hour", Const, 0}, + {"January", Const, 0}, + {"July", Const, 0}, + {"June", Const, 0}, + {"Kitchen", Const, 0}, + {"Layout", Const, 17}, + {"LoadLocation", Func, 0}, + {"LoadLocationFromTZData", Func, 10}, + {"Local", Var, 0}, + {"Location", Type, 0}, + {"March", Const, 0}, + {"May", Const, 0}, + {"Microsecond", Const, 0}, + {"Millisecond", Const, 0}, + {"Minute", Const, 0}, + {"Monday", Const, 0}, + {"Month", Type, 0}, + {"Nanosecond", Const, 0}, + {"NewTicker", Func, 0}, + {"NewTimer", Func, 0}, + {"November", Const, 0}, + {"Now", Func, 0}, + {"October", Const, 0}, + {"Parse", Func, 0}, + {"ParseDuration", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Layout", Field, 0}, + {"ParseError.LayoutElem", Field, 0}, + {"ParseError.Message", Field, 0}, + {"ParseError.Value", Field, 0}, + {"ParseError.ValueElem", Field, 0}, + {"ParseInLocation", Func, 1}, + {"RFC1123", Const, 0}, + {"RFC1123Z", Const, 0}, + {"RFC3339", Const, 0}, + {"RFC3339Nano", Const, 0}, + {"RFC822", Const, 0}, + {"RFC822Z", Const, 0}, + {"RFC850", Const, 0}, + {"RubyDate", Const, 0}, + {"Saturday", Const, 0}, + {"Second", Const, 0}, + {"September", Const, 0}, + {"Since", Func, 0}, + {"Sleep", Func, 0}, + {"Stamp", Const, 0}, + {"StampMicro", Const, 0}, + {"StampMilli", Const, 0}, + {"StampNano", Const, 0}, + {"Sunday", Const, 0}, + {"Thursday", Const, 0}, + {"Tick", Func, 0}, + {"Ticker", Type, 0}, + {"Ticker.C", Field, 0}, + {"Time", Type, 0}, + {"TimeOnly", Const, 20}, + {"Timer", Type, 0}, + {"Timer.C", Field, 0}, + {"Tuesday", Const, 0}, + {"UTC", Var, 0}, + {"Unix", Func, 0}, + {"UnixDate", Const, 0}, + {"UnixMicro", Func, 17}, + {"UnixMilli", Func, 17}, + {"Until", Func, 8}, + {"Wednesday", Const, 0}, + {"Weekday", Type, 0}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0}, + {"(SpecialCase).ToTitle", Method, 0}, + {"(SpecialCase).ToUpper", Method, 0}, + {"ASCII_Hex_Digit", Var, 0}, + {"Adlam", Var, 7}, + {"Ahom", Var, 5}, + {"Anatolian_Hieroglyphs", Var, 5}, + {"Arabic", Var, 0}, + {"Armenian", Var, 0}, + {"Avestan", Var, 0}, + {"AzeriCase", Var, 0}, + {"Balinese", Var, 0}, + {"Bamum", Var, 0}, + {"Bassa_Vah", Var, 4}, + {"Batak", Var, 0}, + {"Bengali", Var, 0}, + {"Bhaiksuki", Var, 7}, + {"Bidi_Control", Var, 0}, + {"Bopomofo", Var, 0}, + {"Brahmi", Var, 0}, + {"Braille", Var, 0}, + {"Buginese", Var, 0}, + {"Buhid", Var, 0}, + {"C", Var, 0}, + {"Canadian_Aboriginal", Var, 0}, + {"Carian", Var, 0}, + {"CaseRange", Type, 0}, + {"CaseRange.Delta", Field, 0}, + {"CaseRange.Hi", Field, 0}, + {"CaseRange.Lo", Field, 0}, + {"CaseRanges", Var, 0}, + {"Categories", Var, 0}, + {"Caucasian_Albanian", Var, 4}, + {"Cc", Var, 0}, + {"Cf", Var, 0}, + {"Chakma", Var, 1}, + {"Cham", Var, 0}, + {"Cherokee", Var, 0}, + {"Chorasmian", Var, 16}, + {"Co", Var, 0}, + {"Common", Var, 0}, + {"Coptic", Var, 0}, + {"Cs", Var, 0}, + {"Cuneiform", Var, 0}, + {"Cypriot", Var, 0}, + {"Cypro_Minoan", Var, 21}, + {"Cyrillic", Var, 0}, + {"Dash", Var, 0}, + {"Deprecated", Var, 0}, + {"Deseret", Var, 0}, + {"Devanagari", Var, 0}, + {"Diacritic", Var, 0}, + {"Digit", Var, 0}, + {"Dives_Akuru", Var, 16}, + {"Dogra", Var, 13}, + {"Duployan", Var, 4}, + {"Egyptian_Hieroglyphs", Var, 0}, + {"Elbasan", Var, 4}, + {"Elymaic", Var, 14}, + {"Ethiopic", Var, 0}, + {"Extender", Var, 0}, + {"FoldCategory", Var, 0}, + {"FoldScript", Var, 0}, + {"Georgian", Var, 0}, + {"Glagolitic", Var, 0}, + {"Gothic", Var, 0}, + {"Grantha", Var, 4}, + {"GraphicRanges", Var, 0}, + {"Greek", Var, 0}, + {"Gujarati", Var, 0}, + {"Gunjala_Gondi", Var, 13}, + {"Gurmukhi", Var, 0}, + {"Han", Var, 0}, + {"Hangul", Var, 0}, + {"Hanifi_Rohingya", Var, 13}, + {"Hanunoo", Var, 0}, + {"Hatran", Var, 5}, + {"Hebrew", Var, 0}, + {"Hex_Digit", Var, 0}, + {"Hiragana", Var, 0}, + {"Hyphen", Var, 0}, + {"IDS_Binary_Operator", Var, 0}, + {"IDS_Trinary_Operator", Var, 0}, + {"Ideographic", Var, 0}, + {"Imperial_Aramaic", Var, 0}, + {"In", Func, 2}, + {"Inherited", Var, 0}, + {"Inscriptional_Pahlavi", Var, 0}, + {"Inscriptional_Parthian", Var, 0}, + {"Is", Func, 0}, + {"IsControl", Func, 0}, + {"IsDigit", Func, 0}, + {"IsGraphic", Func, 0}, + {"IsLetter", Func, 0}, + {"IsLower", Func, 0}, + {"IsMark", Func, 0}, + {"IsNumber", Func, 0}, + {"IsOneOf", Func, 0}, + {"IsPrint", Func, 0}, + {"IsPunct", Func, 0}, + {"IsSpace", Func, 0}, + {"IsSymbol", Func, 0}, + {"IsTitle", Func, 0}, + {"IsUpper", Func, 0}, + {"Javanese", Var, 0}, + {"Join_Control", Var, 0}, + {"Kaithi", Var, 0}, + {"Kannada", Var, 0}, + {"Katakana", Var, 0}, + {"Kawi", Var, 21}, + {"Kayah_Li", Var, 0}, + {"Kharoshthi", Var, 0}, + {"Khitan_Small_Script", Var, 16}, + {"Khmer", Var, 0}, + {"Khojki", Var, 4}, + {"Khudawadi", Var, 4}, + {"L", Var, 0}, + {"Lao", Var, 0}, + {"Latin", Var, 0}, + {"Lepcha", Var, 0}, + {"Letter", Var, 0}, + {"Limbu", Var, 0}, + {"Linear_A", Var, 4}, + {"Linear_B", Var, 0}, + {"Lisu", Var, 0}, + {"Ll", Var, 0}, + {"Lm", Var, 0}, + {"Lo", Var, 0}, + {"Logical_Order_Exception", Var, 0}, + {"Lower", Var, 0}, + {"LowerCase", Const, 0}, + {"Lt", Var, 0}, + {"Lu", Var, 0}, + {"Lycian", Var, 0}, + {"Lydian", Var, 0}, + {"M", Var, 0}, + {"Mahajani", Var, 4}, + {"Makasar", Var, 13}, + {"Malayalam", Var, 0}, + {"Mandaic", Var, 0}, + {"Manichaean", Var, 4}, + {"Marchen", Var, 7}, + {"Mark", Var, 0}, + {"Masaram_Gondi", Var, 10}, + {"MaxASCII", Const, 0}, + {"MaxCase", Const, 0}, + {"MaxLatin1", Const, 0}, + {"MaxRune", Const, 0}, + {"Mc", Var, 0}, + {"Me", Var, 0}, + {"Medefaidrin", Var, 13}, + {"Meetei_Mayek", Var, 0}, + {"Mende_Kikakui", Var, 4}, + {"Meroitic_Cursive", Var, 1}, + {"Meroitic_Hieroglyphs", Var, 1}, + {"Miao", Var, 1}, + {"Mn", Var, 0}, + {"Modi", Var, 4}, + {"Mongolian", Var, 0}, + {"Mro", Var, 4}, + {"Multani", Var, 5}, + {"Myanmar", Var, 0}, + {"N", Var, 0}, + {"Nabataean", Var, 4}, + {"Nag_Mundari", Var, 21}, + {"Nandinagari", Var, 14}, + {"Nd", Var, 0}, + {"New_Tai_Lue", Var, 0}, + {"Newa", Var, 7}, + {"Nko", Var, 0}, + {"Nl", Var, 0}, + {"No", Var, 0}, + {"Noncharacter_Code_Point", Var, 0}, + {"Number", Var, 0}, + {"Nushu", Var, 10}, + {"Nyiakeng_Puachue_Hmong", Var, 14}, + {"Ogham", Var, 0}, + {"Ol_Chiki", Var, 0}, + {"Old_Hungarian", Var, 5}, + {"Old_Italic", Var, 0}, + {"Old_North_Arabian", Var, 4}, + {"Old_Permic", Var, 4}, + {"Old_Persian", Var, 0}, + {"Old_Sogdian", Var, 13}, + {"Old_South_Arabian", Var, 0}, + {"Old_Turkic", Var, 0}, + {"Old_Uyghur", Var, 21}, + {"Oriya", Var, 0}, + {"Osage", Var, 7}, + {"Osmanya", Var, 0}, + {"Other", Var, 0}, + {"Other_Alphabetic", Var, 0}, + {"Other_Default_Ignorable_Code_Point", Var, 0}, + {"Other_Grapheme_Extend", Var, 0}, + {"Other_ID_Continue", Var, 0}, + {"Other_ID_Start", Var, 0}, + {"Other_Lowercase", Var, 0}, + {"Other_Math", Var, 0}, + {"Other_Uppercase", Var, 0}, + {"P", Var, 0}, + {"Pahawh_Hmong", Var, 4}, + {"Palmyrene", Var, 4}, + {"Pattern_Syntax", Var, 0}, + {"Pattern_White_Space", Var, 0}, + {"Pau_Cin_Hau", Var, 4}, + {"Pc", Var, 0}, + {"Pd", Var, 0}, + {"Pe", Var, 0}, + {"Pf", Var, 0}, + {"Phags_Pa", Var, 0}, + {"Phoenician", Var, 0}, + {"Pi", Var, 0}, + {"Po", Var, 0}, + {"Prepended_Concatenation_Mark", Var, 7}, + {"PrintRanges", Var, 0}, + {"Properties", Var, 0}, + {"Ps", Var, 0}, + {"Psalter_Pahlavi", Var, 4}, + {"Punct", Var, 0}, + {"Quotation_Mark", Var, 0}, + {"Radical", Var, 0}, + {"Range16", Type, 0}, + {"Range16.Hi", Field, 0}, + {"Range16.Lo", Field, 0}, + {"Range16.Stride", Field, 0}, + {"Range32", Type, 0}, + {"Range32.Hi", Field, 0}, + {"Range32.Lo", Field, 0}, + {"Range32.Stride", Field, 0}, + {"RangeTable", Type, 0}, + {"RangeTable.LatinOffset", Field, 1}, + {"RangeTable.R16", Field, 0}, + {"RangeTable.R32", Field, 0}, + {"Regional_Indicator", Var, 10}, + {"Rejang", Var, 0}, + {"ReplacementChar", Const, 0}, + {"Runic", Var, 0}, + {"S", Var, 0}, + {"STerm", Var, 0}, + {"Samaritan", Var, 0}, + {"Saurashtra", Var, 0}, + {"Sc", Var, 0}, + {"Scripts", Var, 0}, + {"Sentence_Terminal", Var, 7}, + {"Sharada", Var, 1}, + {"Shavian", Var, 0}, + {"Siddham", Var, 4}, + {"SignWriting", Var, 5}, + {"SimpleFold", Func, 0}, + {"Sinhala", Var, 0}, + {"Sk", Var, 0}, + {"Sm", Var, 0}, + {"So", Var, 0}, + {"Soft_Dotted", Var, 0}, + {"Sogdian", Var, 13}, + {"Sora_Sompeng", Var, 1}, + {"Soyombo", Var, 10}, + {"Space", Var, 0}, + {"SpecialCase", Type, 0}, + {"Sundanese", Var, 0}, + {"Syloti_Nagri", Var, 0}, + {"Symbol", Var, 0}, + {"Syriac", Var, 0}, + {"Tagalog", Var, 0}, + {"Tagbanwa", Var, 0}, + {"Tai_Le", Var, 0}, + {"Tai_Tham", Var, 0}, + {"Tai_Viet", Var, 0}, + {"Takri", Var, 1}, + {"Tamil", Var, 0}, + {"Tangsa", Var, 21}, + {"Tangut", Var, 7}, + {"Telugu", Var, 0}, + {"Terminal_Punctuation", Var, 0}, + {"Thaana", Var, 0}, + {"Thai", Var, 0}, + {"Tibetan", Var, 0}, + {"Tifinagh", Var, 0}, + {"Tirhuta", Var, 4}, + {"Title", Var, 0}, + {"TitleCase", Const, 0}, + {"To", Func, 0}, + {"ToLower", Func, 0}, + {"ToTitle", Func, 0}, + {"ToUpper", Func, 0}, + {"Toto", Var, 21}, + {"TurkishCase", Var, 0}, + {"Ugaritic", Var, 0}, + {"Unified_Ideograph", Var, 0}, + {"Upper", Var, 0}, + {"UpperCase", Const, 0}, + {"UpperLower", Const, 0}, + {"Vai", Var, 0}, + {"Variation_Selector", Var, 0}, + {"Version", Const, 0}, + {"Vithkuqi", Var, 21}, + {"Wancho", Var, 14}, + {"Warang_Citi", Var, 4}, + {"White_Space", Var, 0}, + {"Yezidi", Var, 16}, + {"Yi", Var, 0}, + {"Z", Var, 0}, + {"Zanabazar_Square", Var, 10}, + {"Zl", Var, 0}, + {"Zp", Var, 0}, + {"Zs", Var, 0}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20}, + {"Decode", Func, 0}, + {"DecodeRune", Func, 0}, + {"Encode", Func, 0}, + {"EncodeRune", Func, 0}, + {"IsSurrogate", Func, 0}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18}, + {"DecodeLastRune", Func, 0}, + {"DecodeLastRuneInString", Func, 0}, + {"DecodeRune", Func, 0}, + {"DecodeRuneInString", Func, 0}, + {"EncodeRune", Func, 0}, + {"FullRune", Func, 0}, + {"FullRuneInString", Func, 0}, + {"MaxRune", Const, 0}, + {"RuneCount", Func, 0}, + {"RuneCountInString", Func, 0}, + {"RuneError", Const, 0}, + {"RuneLen", Func, 0}, + {"RuneSelf", Const, 0}, + {"RuneStart", Func, 0}, + {"UTFMax", Const, 0}, + {"Valid", Func, 0}, + {"ValidRune", Func, 1}, + {"ValidString", Func, 0}, + }, + "unsafe": { + {"Add", Func, 0}, + {"Alignof", Func, 0}, + {"Offsetof", Func, 0}, + {"Pointer", Type, 0}, + {"Sizeof", Func, 0}, + {"Slice", Func, 0}, + {"SliceData", Func, 0}, + {"String", Func, 0}, + {"StringData", Func, 0}, + }, +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 000000000..98904017f --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,97 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/exec.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/exec.go new file mode 100644 index 000000000..f2ab5f5eb --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/exec.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "context" + "flag" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "sync" + "testing" + "time" +) + +// HasExec reports whether the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +func HasExec() bool { + switch runtime.GOOS { + case "aix", + "android", + "darwin", + "dragonfly", + "freebsd", + "illumos", + "linux", + "netbsd", + "openbsd", + "plan9", + "solaris", + "windows": + // Known OS that isn't ios or wasm; assume that exec works. + return true + + case "ios", "js", "wasip1": + // ios has an exec syscall but on real iOS devices it might return a + // permission error. In an emulated environment (such as a Corellium host) + // it might succeed, so try it and find out. + // + // As of 2023-04-19 wasip1 and js don't have exec syscalls at all, but we + // may as well use the same path so that this branch can be tested without + // an ios environment. + fallthrough + + default: + tryExecOnce.Do(func() { + exe, err := os.Executable() + if err != nil { + return + } + if flag.Lookup("test.list") == nil { + // We found the executable, but we don't know how to run it in a way + // that should succeed without side-effects. Just forget it. + return + } + // We know that a test executable exists and can run, because we're + // running it now. Use it to check for overall exec support, but be sure + // to remove any environment variables that might trigger non-default + // behavior in a custom TestMain. + cmd := exec.Command(exe, "-test.list=^$") + cmd.Env = []string{} + if err := cmd.Run(); err == nil { + tryExecOk = true + } + }) + return tryExecOk + } +} + +var ( + tryExecOnce sync.Once + tryExecOk bool +) + +// NeedsExec checks that the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +// If not, NeedsExec calls t.Skip with an explanation. +func NeedsExec(t testing.TB) { + if !HasExec() { + t.Skipf("skipping test: cannot exec subprocess on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// CommandContext is like exec.CommandContext, but: +// - skips t if the platform does not support os/exec, +// - if supported, sends SIGQUIT instead of SIGKILL in its Cancel function +// - if the test has a deadline, adds a Context timeout and (if supported) WaitDelay +// for an arbitrary grace period before the test's deadline expires, +// - if Cmd has the Cancel field, fails the test if the command is canceled +// due to the test's deadline, and +// - sets a Cleanup function that verifies that the test did not leak a subprocess. +func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd { + t.Helper() + NeedsExec(t) + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if td, ok := Deadline(t); ok { + // Start with a minimum grace period, just long enough to consume the + // output of a reasonable program after it terminates. + gracePeriod = 100 * time.Millisecond + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + scale, err := strconv.Atoi(s) + if err != nil { + t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err) + } + gracePeriod *= time.Duration(scale) + } + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up: one for the delay between the first + // termination signal being sent (via the Cancel callback when the Context + // expires) and the process being forcibly terminated (via the WaitDelay + // field), and a second one for the delay between the process being + // terminated and the test logging its output for debugging. + // + // (We want to ensure that the test process itself has enough time to + // log the output before it is also terminated.) + cmdTimeout := testTimeout - 2*gracePeriod + + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout) + } + } + + cmd := exec.CommandContext(ctx, name, args...) + + // Use reflection to set the Cancel and WaitDelay fields, if present. + // TODO(bcmills): When we no longer support Go versions below 1.20, + // remove the use of reflect and assume that the fields are always present. + rc := reflect.ValueOf(cmd).Elem() + + if rCancel := rc.FieldByName("Cancel"); rCancel.IsValid() { + rCancel.Set(reflect.ValueOf(func() error { + if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded { + // The command timed out due to running too close to the test's deadline + // (because we specifically set a shorter Context deadline for that + // above). There is no way the test did that intentionally — it's too + // close to the wire! — so mark it as a test failure. That way, if the + // test expects the command to fail for some other reason, it doesn't + // have to distinguish between that reason and a timeout. + t.Errorf("test timed out while running command: %v", cmd) + } else { + // The command is being terminated due to ctx being canceled, but + // apparently not due to an explicit test deadline that we added. + // Log that information in case it is useful for diagnosing a failure, + // but don't actually fail the test because of it. + t.Logf("%v: terminating command: %v", ctx.Err(), cmd) + } + return cmd.Process.Signal(Sigquit) + })) + } + + if rWaitDelay := rc.FieldByName("WaitDelay"); rWaitDelay.IsValid() { + rWaitDelay.Set(reflect.ValueOf(gracePeriod)) + } + + t.Cleanup(func() { + if cancelCtx != nil { + cancelCtx() + } + if cmd.Process != nil && cmd.ProcessState == nil { + t.Errorf("command was started, but test did not wait for it to complete: %v", cmd) + } + }) + + return cmd +} + +// Command is like exec.Command, but applies the same changes as +// testenv.CommandContext (with a default Context). +func Command(t testing.TB, name string, args ...string) *exec.Cmd { + t.Helper() + return CommandContext(t, context.Background(), name, args...) +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv.go index bfadb44be..d4a17ce03 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv.go @@ -10,27 +10,20 @@ import ( "bytes" "fmt" "go/build" - "io/ioutil" "os" + "os/exec" + "path/filepath" "runtime" "runtime/debug" "strings" "sync" + "testing" "time" - exec "golang.org/x/sys/execabs" + "golang.org/x/mod/modfile" + "golang.org/x/tools/internal/goroot" ) -// Testing is an abstraction of a *testing.T. -type Testing interface { - Skipf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) -} - -type helperer interface { - Helper() -} - // packageMainIsDevel reports whether the module containing package main // is a development version (if module information is available). func packageMainIsDevel() bool { @@ -47,12 +40,15 @@ func packageMainIsDevel() bool { return info.Main.Version == "(devel)" } -var checkGoGoroot struct { +var checkGoBuild struct { once sync.Once err error } -func hasTool(tool string) error { +// HasTool reports an error if the required tool is not available in PATH. +// +// For certain tools, it checks that the tool executable is correct. +func HasTool(tool string) error { if tool == "cgo" { enabled, err := cgoEnabled(false) if err != nil { @@ -72,7 +68,7 @@ func hasTool(tool string) error { switch tool { case "patch": // check that the patch tools supports the -o argument - temp, err := ioutil.TempFile("", "patch-test") + temp, err := os.CreateTemp("", "patch-test") if err != nil { return err } @@ -84,23 +80,51 @@ func hasTool(tool string) error { } case "go": - checkGoGoroot.once.Do(func() { - // Ensure that the 'go' command found by exec.LookPath is from the correct - // GOROOT. Otherwise, 'some/path/go test ./...' will test against some - // version of the 'go' binary other than 'some/path/go', which is almost - // certainly not what the user intended. - out, err := exec.Command(tool, "env", "GOROOT").CombinedOutput() + checkGoBuild.once.Do(func() { + if runtime.GOROOT() != "" { + // Ensure that the 'go' command found by exec.LookPath is from the correct + // GOROOT. Otherwise, 'some/path/go test ./...' will test against some + // version of the 'go' binary other than 'some/path/go', which is almost + // certainly not what the user intended. + out, err := exec.Command(tool, "env", "GOROOT").Output() + if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s)", err, exit.Stderr) + } + checkGoBuild.err = err + return + } + GOROOT := strings.TrimSpace(string(out)) + if GOROOT != runtime.GOROOT() { + checkGoBuild.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) + return + } + } + + dir, err := os.MkdirTemp("", "testenv-*") if err != nil { - checkGoGoroot.err = err + checkGoBuild.err = err + return + } + defer os.RemoveAll(dir) + + mainGo := filepath.Join(dir, "main.go") + if err := os.WriteFile(mainGo, []byte("package main\nfunc main() {}\n"), 0644); err != nil { + checkGoBuild.err = err return } - GOROOT := strings.TrimSpace(string(out)) - if GOROOT != runtime.GOROOT() { - checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) + cmd := exec.Command("go", "build", "-o", os.DevNull, mainGo) + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + if len(out) > 0 { + checkGoBuild.err = fmt.Errorf("%v: %v\n%s", cmd, err, out) + } else { + checkGoBuild.err = fmt.Errorf("%v: %v", cmd, err) + } } }) - if checkGoGoroot.err != nil { - return checkGoGoroot.err + if checkGoBuild.err != nil { + return checkGoBuild.err } case "diff": @@ -123,8 +147,11 @@ func cgoEnabled(bypassEnvironment bool) (bool, error) { if bypassEnvironment { cmd.Env = append(append([]string(nil), os.Environ()...), "CGO_ENABLED=") } - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s", err, exit.Stderr) + } return false, err } enabled := strings.TrimSpace(string(out)) @@ -132,9 +159,10 @@ func cgoEnabled(bypassEnvironment bool) (bool, error) { } func allowMissingTool(tool string) bool { - if runtime.GOOS == "android" { - // Android builds generally run tests on a separate machine from the build, - // so don't expect any external tools to be available. + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "plan9", "solaris", "windows": + // Known non-mobile OS. Expect a reasonably complete environment. + default: return true } @@ -172,15 +200,20 @@ func allowMissingTool(tool string) bool { // NeedsTool skips t if the named tool is not present in the path. // As a special case, "cgo" means "go" is present and can compile cgo programs. -func NeedsTool(t Testing, tool string) { - if t, ok := t.(helperer); ok { - t.Helper() - } - err := hasTool(tool) +func NeedsTool(t testing.TB, tool string) { + err := HasTool(tool) if err == nil { return } + + t.Helper() if allowMissingTool(tool) { + // TODO(adonovan): if we skip because of (e.g.) + // mismatched go env GOROOT and runtime.GOROOT, don't + // we risk some users not getting the coverage they expect? + // bcmills notes: this shouldn't be a concern as of CL 404134 (Go 1.19). + // We could probably safely get rid of that GOPATH consistency + // check entirely at this point. t.Skipf("skipping because %s tool not available: %v", tool, err) } else { t.Fatalf("%s tool not available: %v", tool, err) @@ -189,10 +222,8 @@ func NeedsTool(t Testing, tool string) { // NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by // the current process environment is not present in the path. -func NeedsGoPackages(t Testing) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoPackages(t testing.TB) { + t.Helper() tool := os.Getenv("GOPACKAGESDRIVER") switch tool { @@ -212,10 +243,8 @@ func NeedsGoPackages(t Testing) { // NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied // by env is not present in the path. -func NeedsGoPackagesEnv(t Testing, env []string) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoPackagesEnv(t testing.TB, env []string) { + t.Helper() for _, v := range env { if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { @@ -236,20 +265,13 @@ func NeedsGoPackagesEnv(t Testing, env []string) { // and then run them with os.StartProcess or exec.Command. // Android doesn't have the userspace go build needs to run, // and js/wasm doesn't support running subprocesses. -func NeedsGoBuild(t Testing) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGoBuild(t testing.TB) { + t.Helper() // This logic was derived from internal/testing.HasGoBuild and // may need to be updated as that function evolves. NeedsTool(t, "go") - - switch runtime.GOOS { - case "android", "js": - t.Skipf("skipping test: %v can't build and run Go binaries", runtime.GOOS) - } } // ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the @@ -279,7 +301,12 @@ func ExitIfSmallMachine() { // For now, we'll skip them instead. fmt.Fprintf(os.Stderr, "skipping test: %s builder is too slow (https://golang.org/issue/49321)\n", b) default: - return + switch runtime.GOOS { + case "android", "ios": + fmt.Fprintf(os.Stderr, "skipping test: assuming that %s is resource-constrained\n", runtime.GOOS) + default: + return + } } os.Exit(0) } @@ -298,29 +325,34 @@ func Go1Point() int { // NeedsGo1Point skips t if the Go version used to run the test is older than // 1.x. -func NeedsGo1Point(t Testing, x int) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func NeedsGo1Point(t testing.TB, x int) { if Go1Point() < x { + t.Helper() t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x) } } // SkipAfterGo1Point skips t if the Go version used to run the test is newer than // 1.x. -func SkipAfterGo1Point(t Testing, x int) { - if t, ok := t.(helperer); ok { - t.Helper() - } +func SkipAfterGo1Point(t testing.TB, x int) { if Go1Point() > x { + t.Helper() t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x) } } +// NeedsLocalhostNet skips t if networking does not work for ports opened +// with "localhost". +func NeedsLocalhostNet(t testing.TB) { + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf(`Listening on "localhost" fails on %s; see https://go.dev/issue/59718`, runtime.GOOS) + } +} + // Deadline returns the deadline of t, if known, // using the Deadline method added in Go 1.15. -func Deadline(t Testing) (time.Time, bool) { +func Deadline(t testing.TB) (time.Time, bool) { td, ok := t.(interface { Deadline() (time.Time, bool) }) @@ -329,3 +361,132 @@ func Deadline(t Testing) (time.Time, bool) { } return td.Deadline() } + +// WriteImportcfg writes an importcfg file used by the compiler or linker to +// dstPath containing entries for the packages in std and cmd in addition +// to the package to package file mappings in additionalPackageFiles. +func WriteImportcfg(t testing.TB, dstPath string, additionalPackageFiles map[string]string) { + importcfg, err := goroot.Importcfg() + for k, v := range additionalPackageFiles { + importcfg += fmt.Sprintf("\npackagefile %s=%s", k, v) + } + if err != nil { + t.Fatalf("preparing the importcfg failed: %s", err) + } + os.WriteFile(dstPath, []byte(importcfg), 0655) + if err != nil { + t.Fatalf("writing the importcfg failed: %s", err) + } +} + +var ( + gorootOnce sync.Once + gorootPath string + gorootErr error +) + +func findGOROOT() (string, error) { + gorootOnce.Do(func() { + gorootPath = runtime.GOROOT() + if gorootPath != "" { + // If runtime.GOROOT() is non-empty, assume that it is valid. (It might + // not be: for example, the user may have explicitly set GOROOT + // to the wrong directory.) + return + } + + cmd := exec.Command("go", "env", "GOROOT") + out, err := cmd.Output() + if err != nil { + gorootErr = fmt.Errorf("%v: %v", cmd, err) + } + gorootPath = strings.TrimSpace(string(out)) + }) + + return gorootPath, gorootErr +} + +// GOROOT reports the path to the directory containing the root of the Go +// project source tree. This is normally equivalent to runtime.GOROOT, but +// works even if the test binary was built with -trimpath. +// +// If GOROOT cannot be found, GOROOT skips t if t is non-nil, +// or panics otherwise. +func GOROOT(t testing.TB) string { + path, err := findGOROOT() + if err != nil { + if t == nil { + panic(err) + } + t.Helper() + t.Skip(err) + } + return path +} + +// NeedsLocalXTools skips t if the golang.org/x/tools module is replaced and +// its replacement directory does not exist (or does not contain the module). +func NeedsLocalXTools(t testing.TB) { + t.Helper() + + NeedsTool(t, "go") + + cmd := Command(t, "go", "list", "-f", "{{with .Replace}}{{.Dir}}{{end}}", "-m", "golang.org/x/tools") + out, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Skipf("skipping test: %v: %v\n%s", cmd, err, ee.Stderr) + } + t.Skipf("skipping test: %v: %v", cmd, err) + } + + dir := string(bytes.TrimSpace(out)) + if dir == "" { + // No replacement directory, and (since we didn't set -e) no error either. + // Maybe x/tools isn't replaced at all (as in a gopls release, or when + // using a go.work file that includes the x/tools module). + return + } + + // We found the directory where x/tools would exist if we're in a clone of the + // repo. Is it there? (If not, we're probably in the module cache instead.) + modFilePath := filepath.Join(dir, "go.mod") + b, err := os.ReadFile(modFilePath) + if err != nil { + t.Skipf("skipping test: x/tools replacement not found: %v", err) + } + modulePath := modfile.ModulePath(b) + + if want := "golang.org/x/tools"; modulePath != want { + t.Skipf("skipping test: %s module path is %q, not %q", modFilePath, modulePath, want) + } +} + +// NeedsGoExperiment skips t if the current process environment does not +// have a GOEXPERIMENT flag set. +func NeedsGoExperiment(t testing.TB, flag string) { + t.Helper() + + goexp := os.Getenv("GOEXPERIMENT") + set := false + for _, f := range strings.Split(goexp, ",") { + if f == "" { + continue + } + if f == "none" { + // GOEXPERIMENT=none disables all experiment flags. + set = false + break + } + val := true + if strings.HasPrefix(f, "no") { + f, val = f[2:], false + } + if f == flag { + set = val + } + } + if !set { + t.Skipf("skipping test: flag %q is not set in GOEXPERIMENT=%q", flag, goexp) + } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go new file mode 100644 index 000000000..e9ce0d364 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) +// +build !unix,!aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package testenv + +import "os" + +// Sigquit is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var Sigquit = os.Kill diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go new file mode 100644 index 000000000..bc6af1ff8 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package testenv + +import "syscall" + +// Sigquit is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var Sigquit = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go similarity index 89% rename from vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go rename to MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index 7e638ec24..ff9437a36 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -34,30 +34,16 @@ func GetLines(file *token.File) []int { lines []int _ []struct{} } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 - } - - type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { panic("unexpected token.File size") } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines } // AddExistingFiles adds the specified files to the FileSet if they diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/common.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/common.go deleted file mode 100644 index 25a1426d3..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/common.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. -// -// An external version of these APIs is available in the -// golang.org/x/exp/typeparams module. -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -// UnpackIndexExpr extracts data from AST nodes that represent index -// expressions. -// -// For an ast.IndexExpr, the resulting indices slice will contain exactly one -// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable -// number of index expressions. -// -// For nodes that don't represent index expressions, the first return value of -// UnpackIndexExpr will be nil. -func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { - switch e := n.(type) { - case *ast.IndexExpr: - return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: - return e.X, e.Lbrack, e.Indices, e.Rbrack - } - return nil, token.NoPos, nil, token.NoPos -} - -// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on -// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 -// will panic. -func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { - switch len(indices) { - case 0: - panic("empty indices") - case 1: - return &ast.IndexExpr{ - X: x, - Lbrack: lbrack, - Index: indices[0], - Rbrack: rbrack, - } - default: - return &IndexListExpr{ - X: x, - Lbrack: lbrack, - Indices: indices, - Rbrack: rbrack, - } - } -} - -// IsTypeParam reports whether t is a type parameter. -func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) - return ok -} - -// OriginMethod returns the origin method associated with the method fn. -// For methods on a non-generic receiver base type, this is just -// fn. However, for methods with a generic receiver, OriginMethod returns the -// corresponding method in the method set of the origin type. -// -// As a special case, if fn is not a method (has no receiver), OriginMethod -// returns fn. -func OriginMethod(fn *types.Func) *types.Func { - recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - - return fn - } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { - // Receiver is a *types.Interface. - return fn - } - if ForNamed(named).Len() == 0 { - // Receiver base has no type parameters, so we can avoid the lookup below. - return fn - } - orig := NamedTypeOrigin(named) - gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) - return gfn.(*types.Func) -} - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/coretype.go deleted file mode 100644 index 993135ec9..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "go/types" -) - -// CoreType returns the core type of T or nil if T does not have a core type. -// -// See https://go.dev/ref/spec#Core_types for the definition of a core type. -func CoreType(T types.Type) types.Type { - U := T.Underlying() - if _, ok := U.(*types.Interface); !ok { - return U // for non-interface types, - } - - terms, err := _NormalTerms(U) - if len(terms) == 0 || err != nil { - // len(terms) -> empty type set of interface. - // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. - return nil // no core type. - } - - U = terms[0].Type().Underlying() - var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) - for identical = 1; identical < len(terms); identical++ { - if !types.Identical(U, terms[identical].Type().Underlying()) { - break - } - } - - if identical == len(terms) { - // https://go.dev/ref/spec#Core_types - // "There is a single type U which is the underlying type of all types in the type set of T" - return U - } - ch, ok := U.(*types.Chan) - if !ok { - return nil // no core type as identical < len(terms) and U is not a channel. - } - // https://go.dev/ref/spec#Core_types - // "the type chan E if T contains only bidirectional channels, or the type chan<- E or - // <-chan E depending on the direction of the directional channels present." - for chans := identical; chans < len(terms); chans++ { - curr, ok := terms[chans].Type().Underlying().(*types.Chan) - if !ok { - return nil - } - if !types.Identical(ch.Elem(), curr.Elem()) { - return nil // channel elements are not identical. - } - if ch.Dir() == types.SendRecv { - // ch is bidirectional. We can safely always use curr's direction. - ch = curr - } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { - // ch and curr are not bidirectional and not the same direction. - return nil - } - } - return ch -} - -// _NormalTerms returns a slice of terms representing the normalized structural -// type restrictions of a type, if any. -// -// For all types other than *types.TypeParam, *types.Interface, and -// *types.Union, this is just a single term with Tilde() == false and -// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see -// below. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration type -// T[P interface{~int; m()}] int the structural restriction of the type -// parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// _NormalTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, _NormalTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the type is -// invalid, exceeds complexity bounds, or has an empty type set. In the latter -// case, _NormalTerms returns ErrEmptyTypeSet. -// -// _NormalTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { - switch typ := typ.(type) { - case *TypeParam: - return StructuralTerms(typ) - case *Union: - return UnionTermSet(typ) - case *types.Interface: - return InterfaceTermSet(typ) - default: - return []*Term{NewTerm(false, typ)}, nil - } -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/normalize.go deleted file mode 100644 index 9c631b651..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "errors" - "fmt" - "go/types" - "os" - "strings" -) - -//go:generate go run copytermlist.go - -const debug = false - -var ErrEmptyTypeSet = errors.New("empty type set") - -// StructuralTerms returns a slice of terms representing the normalized -// structural type restrictions of a type parameter, if any. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration -// -// type T[P interface{~int; m()}] int -// -// the structural restriction of the type parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// StructuralTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, StructuralTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the -// constraint interface is invalid, exceeds complexity bounds, or has an empty -// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. -// -// StructuralTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { - constraint := tparam.Constraint() - if constraint == nil { - return nil, fmt.Errorf("%s has nil constraint", tparam) - } - iface, _ := constraint.Underlying().(*types.Interface) - if iface == nil { - return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) - } - return InterfaceTermSet(iface) -} - -// InterfaceTermSet computes the normalized terms for a constraint interface, -// returning an error if the term set cannot be computed or is empty. In the -// latter case, the error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { - return computeTermSet(iface) -} - -// UnionTermSet computes the normalized terms for a union, returning an error -// if the term set cannot be computed or is empty. In the latter case, the -// error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func UnionTermSet(union *Union) ([]*Term, error) { - return computeTermSet(union) -} - -func computeTermSet(typ types.Type) ([]*Term, error) { - tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) - if err != nil { - return nil, err - } - if tset.terms.isEmpty() { - return nil, ErrEmptyTypeSet - } - if tset.terms.isAll() { - return nil, nil - } - var terms []*Term - for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) - } - return terms, nil -} - -// A termSet holds the normalized set of terms for a given type. -// -// The name termSet is intentionally distinct from 'type set': a type set is -// all types that implement a type (and includes method restrictions), whereas -// a term set just represents the structural restrictions on a type. -type termSet struct { - complete bool - terms termlist -} - -func indentf(depth int, format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) -} - -func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { - if t == nil { - panic("nil type") - } - - if debug { - indentf(depth, "%s", t.String()) - defer func() { - if err != nil { - indentf(depth, "=> %s", err) - } else { - indentf(depth, "=> %s", res.terms.String()) - } - }() - } - - const maxTermCount = 100 - if tset, ok := seen[t]; ok { - if !tset.complete { - return nil, fmt.Errorf("cycle detected in the declaration of %s", t) - } - return tset, nil - } - - // Mark the current type as seen to avoid infinite recursion. - tset := new(termSet) - defer func() { - tset.complete = true - }() - seen[t] = tset - - switch u := t.Underlying().(type) { - case *types.Interface: - // The term set of an interface is the intersection of the term sets of its - // embedded types. - tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { - return nil, fmt.Errorf("invalid embedded type %T", embedded) - } - tset2, err := computeTermSetInternal(embedded, seen, depth+1) - if err != nil { - return nil, err - } - tset.terms = tset.terms.intersect(tset2.terms) - } - case *Union: - // The term set of a union is the union of term sets of its terms. - tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) - var terms termlist - switch t.Type().Underlying().(type) { - case *types.Interface: - tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) - if err != nil { - return nil, err - } - terms = tset2.terms - case *TypeParam, *Union: - // A stand-alone type parameter or union is not permitted as union - // term. - return nil, fmt.Errorf("invalid union term %T", t) - default: - if t.Type() == types.Typ[types.Invalid] { - continue - } - terms = termlist{{t.Tilde(), t.Type()}} - } - tset.terms = tset.terms.union(terms) - if len(tset.terms) > maxTermCount { - return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) - } - } - case *TypeParam: - panic("unreachable") - default: - // For all other types, the term set is just a single non-tilde term - // holding the type itself. - if u != types.Typ[types.Invalid] { - tset.terms = termlist{{false, t}} - } - } - return tset, nil -} - -// under is a facade for the go/types internal function of the same name. It is -// used by typeterm.go. -func under(t types.Type) types.Type { - return t.Underlying() -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/termlist.go deleted file mode 100644 index 933106a23..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import ( - "bytes" - "go/types" -) - -// A termlist represents the type set represented by the union -// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. -// A termlist is in normal form if all terms are disjoint. -// termlist operations don't require the operands to be in -// normal form. -type termlist []*term - -// allTermlist represents the set of all types. -// It is in normal form. -var allTermlist = termlist{new(term)} - -// String prints the termlist exactly (without normalization). -func (xl termlist) String() string { - if len(xl) == 0 { - return "∅" - } - var buf bytes.Buffer - for i, x := range xl { - if i > 0 { - buf.WriteString(" ∪ ") - } - buf.WriteString(x.String()) - } - return buf.String() -} - -// isEmpty reports whether the termlist xl represents the empty set of types. -func (xl termlist) isEmpty() bool { - // If there's a non-nil term, the entire list is not empty. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil { - return false - } - } - return true -} - -// isAll reports whether the termlist xl represents the set of all types. -func (xl termlist) isAll() bool { - // If there's a 𝓤 term, the entire list is 𝓤. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil && x.typ == nil { - return true - } - } - return false -} - -// norm returns the normal form of xl. -func (xl termlist) norm() termlist { - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - used := make([]bool, len(xl)) - var rl termlist - for i, xi := range xl { - if xi == nil || used[i] { - continue - } - for j := i + 1; j < len(xl); j++ { - xj := xl[j] - if xj == nil || used[j] { - continue - } - if u1, u2 := xi.union(xj); u2 == nil { - // If we encounter a 𝓤 term, the entire list is 𝓤. - // Exit early. - // (Note that this is not just an optimization; - // if we continue, we may end up with a 𝓤 term - // and other terms and the result would not be - // in normal form.) - if u1.typ == nil { - return allTermlist - } - xi = u1 - used[j] = true // xj is now unioned into xi - ignore it in future iterations - } - } - rl = append(rl, xi) - } - return rl -} - -// union returns the union xl ∪ yl. -func (xl termlist) union(yl termlist) termlist { - return append(xl, yl...).norm() -} - -// intersect returns the intersection xl ∩ yl. -func (xl termlist) intersect(yl termlist) termlist { - if xl.isEmpty() || yl.isEmpty() { - return nil - } - - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - var rl termlist - for _, x := range xl { - for _, y := range yl { - if r := x.intersect(y); r != nil { - rl = append(rl, r) - } - } - } - return rl.norm() -} - -// equal reports whether xl and yl represent the same type set. -func (xl termlist) equal(yl termlist) bool { - // TODO(gri) this should be more efficient - return xl.subsetOf(yl) && yl.subsetOf(xl) -} - -// includes reports whether t ∈ xl. -func (xl termlist) includes(t types.Type) bool { - for _, x := range xl { - if x.includes(t) { - return true - } - } - return false -} - -// supersetOf reports whether y ⊆ xl. -func (xl termlist) supersetOf(y *term) bool { - for _, x := range xl { - if y.subsetOf(x) { - return true - } - } - return false -} - -// subsetOf reports whether xl ⊆ yl. -func (xl termlist) subsetOf(yl termlist) bool { - if yl.isEmpty() { - return xl.isEmpty() - } - - // each term x of xl must be a subset of yl - for _, x := range xl { - if !yl.supersetOf(x) { - return false // x is not a subset yl - } - } - return true -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index b4788978f..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index 114a36b86..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeterm.go deleted file mode 100644 index 7ddee28d9..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import "go/types" - -// A term describes elementary type sets: -// -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// -type term struct { - tilde bool // valid if typ != nil - typ types.Type -} - -func (x *term) String() string { - switch { - case x == nil: - return "∅" - case x.typ == nil: - return "𝓤" - case x.tilde: - return "~" + x.typ.String() - default: - return x.typ.String() - } -} - -// equal reports whether x and y represent the same type set. -func (x *term) equal(y *term) bool { - // easy cases - switch { - case x == nil || y == nil: - return x == y - case x.typ == nil || y.typ == nil: - return x.typ == y.typ - } - // ∅ ⊂ x, y ⊂ 𝓤 - - return x.tilde == y.tilde && types.Identical(x.typ, y.typ) -} - -// union returns the union x ∪ y: zero, one, or two non-nil terms. -func (x *term) union(y *term) (_, _ *term) { - // easy cases - switch { - case x == nil && y == nil: - return nil, nil // ∅ ∪ ∅ == ∅ - case x == nil: - return y, nil // ∅ ∪ y == y - case y == nil: - return x, nil // x ∪ ∅ == x - case x.typ == nil: - return x, nil // 𝓤 ∪ y == 𝓤 - case y.typ == nil: - return y, nil // x ∪ 𝓤 == 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return x, y // x ∪ y == (x, y) if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∪ ~t == ~t - // ~t ∪ T == ~t - // T ∪ ~t == ~t - // T ∪ T == T - if x.tilde || !y.tilde { - return x, nil - } - return y, nil -} - -// intersect returns the intersection x ∩ y. -func (x *term) intersect(y *term) *term { - // easy cases - switch { - case x == nil || y == nil: - return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ - case x.typ == nil: - return y // 𝓤 ∩ y == y - case y.typ == nil: - return x // x ∩ 𝓤 == x - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return nil // x ∩ y == ∅ if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∩ ~t == ~t - // ~t ∩ T == T - // T ∩ ~t == T - // T ∩ T == T - if !x.tilde || y.tilde { - return x - } - return y -} - -// includes reports whether t ∈ x. -func (x *term) includes(t types.Type) bool { - // easy cases - switch { - case x == nil: - return false // t ∈ ∅ == false - case x.typ == nil: - return true // t ∈ 𝓤 == true - } - // ∅ ⊂ x ⊂ 𝓤 - - u := t - if x.tilde { - u = under(u) - } - return types.Identical(x.typ, u) -} - -// subsetOf reports whether x ⊆ y. -func (x *term) subsetOf(y *term) bool { - // easy cases - switch { - case x == nil: - return true // ∅ ⊆ y == true - case y == nil: - return false // x ⊆ ∅ == false since x != ∅ - case y.typ == nil: - return true // x ⊆ 𝓤 == true - case x.typ == nil: - return false // 𝓤 ⊆ y == false since y != 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return false // x ⊆ y == false if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ⊆ ~t == true - // ~t ⊆ T == false - // T ⊆ ~t == true - // T ⊆ T == true - return !x.tilde || y.tilde -} - -// disjoint reports whether x ∩ y == ∅. -// x.typ and y.typ must not be nil. -func (x *term) disjoint(y *term) bool { - if debug && (x.typ == nil || y.typ == nil) { - panic("invalid argument(s)") - } - ux := x.typ - if y.tilde { - ux = under(ux) - } - uy := y.typ - if x.tilde { - uy = under(uy) - } - return !types.Identical(ux, uy) -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index d38ee3c27..834e05381 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -30,6 +30,12 @@ type ErrorCode int // convention that "bad" implies a problem with syntax, and "invalid" implies a // problem with types. +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + const ( _ ErrorCode = iota @@ -153,15 +159,15 @@ const ( /* decls > var (+ other variable assignment codes) */ - // UntypedNil occurs when the predeclared (untyped) value nil is used to + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to // initialize a variable declared without an explicit type. // // Example: // var x = nil - UntypedNil + UntypedNilUse // WrongAssignCount occurs when the number of values on the right-hand side - // of an assignment or or initialization expression does not match the number + // of an assignment or initialization expression does not match the number // of variables on the left-hand side. // // Example: @@ -1443,10 +1449,10 @@ const ( NotAGenericType // WrongTypeArgCount occurs when a type or function is instantiated with an - // incorrent number of type arguments, including when a generic type or + // incorrect number of type arguments, including when a generic type or // function is used without instantiation. // - // Errors inolving failed type inference are assigned other error codes. + // Errors involving failed type inference are assigned other error codes. // // Example: // type T[p any] int @@ -1523,4 +1529,32 @@ const ( // Example: // type T[P any] struct{ *P } MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + ) diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go index de90e9515..15ecf7c5d 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -8,6 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] _ = x[Test-1] _ = x[BlankPkgName-2] _ = x[MismatchedPkgName-3] @@ -23,7 +24,7 @@ func _() { _ = x[InvalidConstInit-13] _ = x[InvalidConstVal-14] _ = x[InvalidConstType-15] - _ = x[UntypedNil-16] + _ = x[UntypedNilUse-16] _ = x[WrongAssignCount-17] _ = x[UnassignableOperand-18] _ = x[NoNewVar-19] @@ -152,16 +153,27 @@ func _() { _ = x[MisplacedConstraintIface-142] _ = x[InvalidMethodTypeParams-143] _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] } -const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam" +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) -var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136} +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) func (i ErrorCode) String() string { - i -= 1 - if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { - return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 000000000..fea7c8b75 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/toonew.go new file mode 100644 index 000000000..cc86487ea --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/toonew.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// TooNewStdSymbols computes the set of package-level symbols +// exported by pkg that are not available at the specified version. +// The result maps each symbol to its minimum version. +// +// The pkg is allowed to contain type errors. +func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { + disallowed := make(map[types.Object]string) + + // Pass 1: package-level symbols. + symbols := stdlib.PackageSymbols[pkg.Path()] + for _, sym := range symbols { + symver := sym.Version.String() + if versions.Before(version, symver) { + switch sym.Kind { + case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: + disallowed[pkg.Scope().Lookup(sym.Name)] = symver + } + } + } + + // Pass 2: fields and methods. + // + // We allow fields and methods if their associated type is + // disallowed, as otherwise we would report false positives + // for compatibility shims. Consider: + // + // //go:build go1.22 + // type T struct { F std.Real } // correct new API + // + // //go:build !go1.22 + // type T struct { F fake } // shim + // type fake struct { ... } + // func (fake) M () {} + // + // These alternative declarations of T use either the std.Real + // type, introduced in go1.22, or a fake type, for the field + // F. (The fakery could be arbitrarily deep, involving more + // nested fields and methods than are shown here.) Clients + // that use the compatibility shim T will compile with any + // version of go, whether older or newer than go1.22, but only + // the newer version will use the std.Real implementation. + // + // Now consider a reference to method M in new(T).F.M() in a + // module that requires a minimum of go1.21. The analysis may + // occur using a version of Go higher than 1.21, selecting the + // first version of T, so the method M is Real.M. This would + // spuriously cause the analyzer to report a reference to a + // too-new symbol even though this expression compiles just + // fine (with the fake implementation) using go1.21. + for _, sym := range symbols { + symVersion := sym.Version.String() + if !versions.Before(version, symVersion) { + continue // allowed + } + + var obj types.Object + switch sym.Kind { + case stdlib.Field: + typename, name := sym.SplitField() + if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) + } + + case stdlib.Method: + ptr, recvname, name := sym.SplitMethod() + if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) + } + } + if obj != nil { + disallowed[obj] = symVersion + } + } + + return disallowed +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351b..7c77c2fbc 100644 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,5 +48,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } - -var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types_118.go deleted file mode 100644 index a42b072a6..000000000 --- a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typesinternal - -import ( - "go/types" -) - -func init() { - SetGoVersion = func(conf *types.Config, version string) bool { - conf.GoVersion = version - return true - } -} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/features.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 000000000..b53f17861 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/gover.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 000000000..bbabcd22e --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 000000000..377bf7a53 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 000000000..f65beed9d --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 000000000..1a9efa126 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 000000000..b7ef216df --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 000000000..562eef21f --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go121.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 000000000..b4345d334 --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} + +// InitFileVersions is a noop when compiled with this Go version. +func InitFileVersions(*types.Info) {} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go122.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 000000000..aac5db62c --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/versions.go b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 000000000..8d1f7453d --- /dev/null +++ b/MobileLibrary/go-mobile/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "strings" +) + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/MobileLibrary/go-mobile/vendor/modules.txt b/MobileLibrary/go-mobile/vendor/modules.txt index 7a6122e92..7aa3c1312 100644 --- a/MobileLibrary/go-mobile/vendor/modules.txt +++ b/MobileLibrary/go-mobile/vendor/modules.txt @@ -1,5 +1,5 @@ -# golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56 -## explicit; go 1.12 +# golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63 +## explicit; go 1.18 golang.org/x/exp/shiny/driver/gldriver golang.org/x/exp/shiny/driver/internal/drawer golang.org/x/exp/shiny/driver/internal/errscreen @@ -8,42 +8,43 @@ golang.org/x/exp/shiny/driver/internal/lifecycler golang.org/x/exp/shiny/driver/internal/win32 golang.org/x/exp/shiny/driver/internal/x11key golang.org/x/exp/shiny/screen -# golang.org/x/image v0.0.0-20190802002840-cff245a6509b -## explicit; go 1.12 +# golang.org/x/image v0.17.0 +## explicit; go 1.18 golang.org/x/image/draw golang.org/x/image/math/f64 -# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 -## explicit; go 1.17 +# golang.org/x/mod v0.18.0 +## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde -## explicit +# golang.org/x/sync v0.7.0 +## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f -## explicit; go 1.17 -golang.org/x/sys/execabs -golang.org/x/sys/internal/unsafeheader -golang.org/x/sys/windows -# golang.org/x/tools v0.1.12 +# golang.org/x/sys v0.21.0 ## explicit; go 1.18 +golang.org/x/sys/windows +# golang.org/x/tools v0.22.0 +## explicit; go 1.19 golang.org/x/tools/go/expect golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/internal/pkgbits golang.org/x/tools/go/packages golang.org/x/tools/go/packages/packagestest +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand -golang.org/x/tools/internal/lsp/bug +golang.org/x/tools/internal/goroot golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/proxydir -golang.org/x/tools/internal/span +golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/testenv -golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h index 7aa5b1d4e..9106665db 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h @@ -299,12 +299,31 @@ WWAN or vice versa or VPN state changed */ - (void)onApplicationParameters:(NSDictionary * _Nonnull)parameters; +/*! + Called when tunnel-core emits a message to be displayed to the in-proxy operator + @param message The operator message received. + */ +- (void)onInproxyOperatorMessage:(NSString * _Nonnull)message; +/*! + Called when tunnel-core reports in-proxy usage statistics + By default onInproxyProxyActivity is disabled. Enable it by setting + EmitInproxyProxyActivity to true in the Psiphon config. + @param connectingClients Number of clients connecting to the proxy. + @param connectedClients Number of clients currently connected to the proxy. + @param bytesUp Bytes uploaded through the proxy since the last report. + @param bytesDown Bytes downloaded through the proxy since the last report. + */ +- (void)onInproxyProxyActivity:(int)connectingClients + connectedClients:(int)connectedClients + bytesUp:(long)bytesUp + bytesDown:(long)bytesDown; /*! Called when tunnel-core reports connected server region information @param region The server region received. */ - (void)onConnectedServerRegion:(NSString * _Nonnull)region; + @end /*! diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m index f8c3b3c4e..268e0fc6d 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m @@ -1174,6 +1174,33 @@ - (void)handlePsiphonNotice:(NSString * _Nonnull)noticeJSON { }); } } + else if ([noticeType isEqualToString:@"InproxyOperatorMessage"]) { + id message = [notice valueForKeyPath:@"data.message"]; + if (![message isKindOfClass:[NSString class]]) { + [self logMessage:[NSString stringWithFormat: @"InproxyOperatorMessage notice missing data.message: %@", noticeJSON]]; + return; + } + if ([self.tunneledAppDelegate respondsToSelector:@selector(onInproxyOperatorMessage:)]) { + dispatch_sync(self->callbackQueue, ^{ + [self.tunneledAppDelegate onInproxyOperatorMessage:message]; + }); + } + } + else if ([noticeType isEqualToString:@"InproxyProxyActivity"]) { + id connectingClients = [notice valueForKeyPath:@"data.connectingClients"]; + id connectedClients = [notice valueForKeyPath:@"data.connectedClients"]; + id bytesUp = [notice valueForKeyPath:@"data.bytesUp"]; + id bytesDown = [notice valueForKeyPath:@"data.bytesDown"]; + if (![connectingClients isKindOfClass:[NSNumber class]] || ![connectedClients isKindOfClass:[NSNumber class]] || ![bytesUp isKindOfClass:[NSNumber class]] || ![bytesDown isKindOfClass:[NSNumber class]]) { + [self logMessage:[NSString stringWithFormat: @"InproxyProxyActivity notice has invalid data types: %@", noticeJSON]]; + return; + } + if ([self.tunneledAppDelegate respondsToSelector:@selector(onInproxyProxyActivity:connectedClients:bytesUp:bytesDown:)]) { + dispatch_sync(self->callbackQueue, ^{ + [self.tunneledAppDelegate onInproxyProxyActivity:[connectingClients intValue] connectedClients:[connectedClients intValue] bytesUp:[bytesUp longValue] bytesDown:[bytesDown longValue]]; + }); + } + } else if ([noticeType isEqualToString:@"ConnectedServerRegion"]) { id region = [notice valueForKeyPath:@"data.serverRegion"]; if (![region isKindOfClass:[NSString class]]) { diff --git a/MobileLibrary/iOS/build-psiphon-framework.sh b/MobileLibrary/iOS/build-psiphon-framework.sh index 37e346358..d3a6a6ebb 100755 --- a/MobileLibrary/iOS/build-psiphon-framework.sh +++ b/MobileLibrary/iOS/build-psiphon-framework.sh @@ -17,7 +17,7 @@ set -e -u -x if [ -z ${1+x} ]; then BUILD_TAGS=""; else BUILD_TAGS="$1"; fi # Modify this value as we use newer Go versions. -GO_VERSION_REQUIRED="1.21.9" +GO_VERSION_REQUIRED="1.22.4" # At this time, psiphon-tunnel-core doesn't support modules export GO111MODULE=off diff --git a/MobileLibrary/psi/psi.go b/MobileLibrary/psi/psi.go index 896e96b54..180f15aa4 100644 --- a/MobileLibrary/psi/psi.go +++ b/MobileLibrary/psi/psi.go @@ -488,7 +488,7 @@ func GetPacketTunnelMTU() int { // If called before Start, log notices will emit to stderr. func WriteRuntimeProfiles(outputDirectory string, cpuSampleDurationSeconds, blockSampleDurationSeconds int) { common.WriteRuntimeProfiles( - psiphon.NoticeCommonLogger(), + psiphon.NoticeCommonLogger(false), outputDirectory, "", cpuSampleDurationSeconds, diff --git a/README.md b/README.md index b18719456..5f8c7fe6b 100644 --- a/README.md +++ b/README.md @@ -165,55 +165,5 @@ go: added github.com/Psiphon-Labs/psiphon-tunnel-core v1.0.11-0.20240424194431-3 Acknowledgements -------------------------------------------------------------------------------- -Psiphon Tunnel Core uses: - -* [Go](https://golang.org) -* [agl/ed25519](https://github.com/agl/ed25519) -* [AndreasBriese/bbloom](https://github.com/AndreasBriese/bbloom) -* [aristanetworks/goarista/monotime](https://github.com/aristanetworks/goarista) -* [armon/go-proxyproto](https://github.com/armon/go-proxyproto) -* [armon/go-socks](https://github.com/armon/go-socks5) -* [bifurcation/mint](https://github.com/bifurcation/mint) -* [boltdb/bolt](https://github.com/boltdb/bolt) -* [cheekybits/genny/generic](https://github.com/cheekybits/genny/generic) -* [codahale/sss](https://github.com/codahale/sss) -* [cognusion/go-cache-lru](https://github.com/cognusion/go-cache-lru) -* [creack/goselect](https://github.com/creack/goselect) -* [davecgh/go-spew/spew](https://github.com/davecgh/go-spew/spew) -* [deckarep/golang-set](https://github.com/deckarep/golang-set) -* [dgraph-io/badger](https://github.com/dgraph-io/badger) -* [dgryski/go-farm](https://github.com/dgryski/go-farm) -* [elazarl/goproxy](https://github.com/elazarl/goproxy) -* [florianl/go-nfqueue](https://github.com/florianl/go-nfqueue) -* [gobwas/glob](https://github.com/gobwas/glob) -* [golang/protobuf](https://github.com/golang/protobuf) -* [google/gopacket](https://github.com/google/gopacket) -* [grafov/m3u8](https://github.com/grafov/m3u8) -* [hashicorp/golang-lru](https://github.com/hashicorp/golang-lru) -* [juju/ratelimit](https://github.com/juju/ratelimit) -* [kardianos/osext](https://github.com/kardianos/osext) -* [groupcache/lru]("github.com/golang/groupcache/lru") -* [lucas-clemente/quic-go](https://github.com/lucas-clemente/quic-go) -* [marusama/semaphore](https://github.com/marusama/semaphore) -* [mdlayher/netlink)](https://github.com/mdlayher/netlink) -* [miekg/dns](https://github.com/miekg/dns) -* [mitchellh/panicwrap](https://github.com/mitchellh/panicwrap) -* [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang) -* [patrickmn/go-cache](https://github.com/patrickmn/go-cache) -* [pkg/errors](https://github.com/pkg/errors) -* [pmezard/go-difflib](https://github.com/pmezard/go-difflib) -* [refraction-networking/gotapdance](https://github.com/refraction-networking/gotapdance) -* [refraction-networking/utls](https://github.com/refraction-networking/utls) -* [ryanuber/go-glob](https://github.com/ryanuber/go-glob) -* [sergeyfrolov/bsbuffer](https://github.com/sergeyfrolov/bsbuffer) -* [sirupsen/logrus](https://github.com/sirupsen/logrus) -* [stretchr/testify](https://github.com/stretchr/testify) -* [syndtr/gocapability/capability](https://github.com/syndtr/gocapability/capability) -* [ThomsonReutersEikon/go-ntlm](https://github.com/ThomsonReutersEikon/go-ntlm) -* [wader/filtertransport](https://github.com/wader/filtertransport) -* [Yawning/chacha20](https://github.com/Yawning/chacha20) -* [Yawning/goptlib](https://github.com/Yawning/goptlib) -* [yawning/obfs4](https://gitlab.com/yawning/obfs4) -* [zach-klippenstein/goregen](https://github.com/zach-klippenstein/goregen) -* [zap](https://go.uber.org/zap) +Psiphon Tunnel Core uses the following Go modules: https://github.com/Psiphon-Labs/psiphon-tunnel-core/blob/master/go.mod diff --git a/Server/Dockerfile-binary-builder b/Server/Dockerfile-binary-builder index 3cf656743..ce369eec0 100644 --- a/Server/Dockerfile-binary-builder +++ b/Server/Dockerfile-binary-builder @@ -1,6 +1,6 @@ FROM alpine:3.18.4 -ENV GOLANG_VERSION 1.21.9 +ENV GOLANG_VERSION 1.22.4 ENV GOLANG_SRC_URL https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz RUN set -ex \ diff --git a/Server/main.go b/Server/main.go index 46053ef85..a92fb5629 100644 --- a/Server/main.go +++ b/Server/main.go @@ -166,8 +166,6 @@ func main() { &server.GenerateConfigParams{ LogFilename: generateLogFilename, ServerIPAddress: serverIPaddress, - EnableSSHAPIRequests: true, - WebServerPort: generateWebServerPort, TunnelProtocolPorts: tunnelProtocolPorts, TrafficRulesConfigFilename: generateTrafficRulesConfigFilename, OSLConfigFilename: generateOSLConfigFilename, diff --git a/go.mod b/go.mod index 9a6c87f30..bb184c77a 100644 --- a/go.mod +++ b/go.mod @@ -2,11 +2,35 @@ module github.com/Psiphon-Labs/psiphon-tunnel-core go 1.21 +// The following replace is required only when the build tag +// PSIPHON_ENABLE_REFRACTION_NETWORKING is specified. + replace gitlab.com/yawning/obfs4.git => github.com/jmwample/obfs4 v0.0.0-20230725223418-2d2e5b4a16ba -replace github.com/pion/dtls/v2 => github.com/mingyech/dtls/v2 v2.0.0 +// When this is the main module, github.com/pion/dtls/v2, used by +// psiphon/common/inproxy via pion/webrtc, is replaced with a fork +// which adds support for optional DTLS ClientHello randomization. +// This fork is currently based on https://github.com/pion/dtls v2.2.7. +// +// This fork also includes the mingyech/dtls Conjure customizations. +// +// In addition, ice/v2 and webrtc/v3 are replaced by forks, based on +// github.com/pion/ice/v2 v2.3.24 and github.com/pion/webrtc/v3 v3.2.40 +// respectively, containing Psiphon customizations. See comments in +// psiphon/common/inproxy/newWebRTCConn for details. +// +// The following replaces are required only when the build tags +// PSIPHON_ENABLE_REFRACTION_NETWORKING (dtls/v2 only) or +// PSIPHON_ENABLE_INPROXY are specified. + +replace github.com/pion/dtls/v2 => ./replace/dtls + +replace github.com/pion/ice/v2 => ./replace/ice + +replace github.com/pion/webrtc/v3 => ./replace/webrtc require ( + filippo.io/edwards25519 v1.1.0 github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7 github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737 @@ -15,6 +39,7 @@ require ( github.com/Psiphon-Labs/quic-go v0.0.0-20240424181006-45545f5e1536 github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 + github.com/bits-and-blooms/bloom/v3 v3.6.0 github.com/cespare/xxhash v1.1.0 github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea @@ -23,8 +48,11 @@ require ( github.com/elazarl/goproxy v0.0.0-20200809112317-0581fc3aee2d github.com/elazarl/goproxy/ext v0.0.0-20200809112317-0581fc3aee2d github.com/florianl/go-nfqueue v1.1.1-0.20200829120558-a2f196e98ab0 + github.com/flynn/noise v1.0.1-0.20220214164934-d803f5c4b0f4 + github.com/fxamacker/cbor/v2 v2.5.0 + github.com/gammazero/deque v0.2.1 github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/google/gopacket v1.1.19 github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427 github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a @@ -32,64 +60,90 @@ require ( github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 github.com/oschwald/maxminddb-golang v1.12.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pion/sctp v1.8.8 + github.com/pion/datachannel v1.5.5 + github.com/pion/dtls/v2 v2.2.7 + github.com/pion/ice/v2 v2.3.24 + github.com/pion/logging v0.2.2 + github.com/pion/sctp v1.8.16 + github.com/pion/sdp/v3 v3.0.9 + github.com/pion/stun v0.6.1 + github.com/pion/transport/v2 v2.2.4 + github.com/pion/webrtc/v3 v3.2.40 github.com/refraction-networking/conjure v0.7.11-0.20240130155008-c8df96195ab2 github.com/refraction-networking/gotapdance v1.7.10 github.com/refraction-networking/utls v1.3.3 github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 + github.com/shirou/gopsutil/v4 v4.24.5 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 github.com/wader/filtertransport v0.0.0-20200316221534-bdd9e61eee78 + github.com/wlynxg/anet v0.0.1 golang.org/x/crypto v0.22.0 golang.org/x/net v0.24.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.19.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.20.0 golang.org/x/term v0.19.0 golang.org/x/time v0.5.0 + golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b + tailscale.com v1.40.0 ) require ( filippo.io/bigmod v0.0.1 // indirect filippo.io/keygen v0.0.0-20230306160926-5201437acf8e // indirect - github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect github.com/andybalholm/brotli v1.0.5 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dchest/siphash v1.2.3 // indirect - github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/gaukas/godicttls v0.0.4 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect - github.com/josharian/native v1.0.0 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect + github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a // indirect - github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mdlayher/netlink v1.7.1 // indirect + github.com/mdlayher/socket v0.4.0 // indirect github.com/mroth/weightedrand v1.0.0 // indirect - github.com/onsi/ginkgo/v2 v2.9.5 // indirect + github.com/onsi/ginkgo/v2 v2.12.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pion/dtls/v2 v2.2.7 // indirect - github.com/pion/logging v0.2.2 // indirect + github.com/pion/interceptor v0.1.25 // indirect + github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.3 // indirect + github.com/pion/rtcp v1.2.12 // indirect + github.com/pion/rtp v1.8.5 // indirect + github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/turn/v2 v2.1.3 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/refraction-networking/ed25519 v0.1.2 // indirect github.com/refraction-networking/obfs4 v0.1.2 // indirect github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.5.0 // indirect + go.uber.org/mock v0.4.0 // indirect + go4.org/mem v0.0.0-20210711025021-927187094b94 // indirect golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect - golang.org/x/mod v0.10.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.9.1 // indirect + golang.org/x/tools v0.12.0 // indirect + golang.zx2c4.com/wireguard/windows v0.5.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index a29224c8b..2407fb76d 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,11 @@ filippo.io/bigmod v0.0.1 h1:OaEqDr3gEbofpnHbGqZweSL/bLMhy1pb54puiCDeuOA= filippo.io/bigmod v0.0.1/go.mod h1:KyzqAbH7bRH6MOuOF1TPfUjvLoi0mRF2bIyD2ouRNQI= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/keygen v0.0.0-20230306160926-5201437acf8e h1:+xwUCyMiCWKWsI0RowhzB4sngpUdMHgU6lLuWJCX5Dg= filippo.io/keygen v0.0.0-20230306160926-5201437acf8e/go.mod h1:ZGSiF/b2hd6MRghF/cid0vXw8pXykRTmIu+JSPw/NCQ= -github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57 h1:CVuXDbdzPW0XCNYTldy5dQues57geAs+vfwz3FTTpy8= -github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -17,22 +18,22 @@ github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737 h1:QTMy7Uc github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737/go.mod h1:Enj/Gszv2zCbuRbHbabmNvfO9EM+5kmaGj8CyjwNPlY= github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464 h1:VmnMMMheFXwLV0noxYhbJbLmkV4iaVW3xNnj6xcCNHo= github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464/go.mod h1:Pe5BqN2DdIdChorAXl6bDaQd/wghpCleJfid2NoSli0= -github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240305020009-09f917290799 h1:dHFQz6jeIr2RdtlioyGIdJw2UfKF7G+g7GYnQxhbgrk= -github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240305020009-09f917290799/go.mod h1:ECTyVpleBW9oR/iHi185js4Fs7YD5T8A6tujOUzltxs= github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240424193802-52b2602ec60c h1:+SEszyxW7yu+smufzSlAszj/WmOYJ054DJjb5jllulc= github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240424193802-52b2602ec60c/go.mod h1:AaKKoshr8RI1LZTheeNDtNuZ39qNVPWVK4uir2c2XIs= -github.com/Psiphon-Labs/quic-go v0.0.0-20240305203241-7c4a760d03cc h1:o9jpHz1Vuum0oasqBX4kKB8VQrR+VJzEJsBg6XAz5YU= -github.com/Psiphon-Labs/quic-go v0.0.0-20240305203241-7c4a760d03cc/go.mod h1:1gvBCJ18gsMqvZXkPkq0u9/BQKvjNS5RFWwF5uLl2Ys= -github.com/Psiphon-Labs/quic-go v0.0.0-20240424180206-cfb2699b1a70 h1:xe7qczglTNDPIbQgCh2Wb/nw81uFbAMHzekkCfQc/04= -github.com/Psiphon-Labs/quic-go v0.0.0-20240424180206-cfb2699b1a70/go.mod h1:Dcp6EsbioehAvUSMTMbqFV6Z+q+IApml2Q3r8eXQS5M= github.com/Psiphon-Labs/quic-go v0.0.0-20240424181006-45545f5e1536 h1:pM5ex1QufkHV8lDR6Tc1Crk1bW5lYZjrFIJGZNBWE9k= github.com/Psiphon-Labs/quic-go v0.0.0-20240424181006-45545f5e1536/go.mod h1:2MTiPsgoOqWs3Bo6Xr3ElMBX6zzfjd3YkDFpQJLwHdQ= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f h1:SaJ6yqg936TshyeFZqQE+N+9hYkIeL9AMr7S4voCl10= github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 h1:BU+NxuoaYPIvvp8NNkNlLr8aA0utGyuunf4Q3LJ0bh0= github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bloom/v3 v3.6.0 h1:dTU0OVLJSoOhz9m68FTXMFfA39nR8U/nTCs1zb26mOI= +github.com/bits-and-blooms/bloom/v3 v3.6.0/go.mod h1:VKlUSvp0lFIYqxJjzdnSsZEw4iHb1kOL2tfHTgyJBHg= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -42,10 +43,11 @@ github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlR github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao= +github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea h1:9C2rdYRp8Vzwhm3sbFX0yYfB+70zKFRjn7cnPCucHSw= github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea/go.mod h1:MdyNkAe06D7xmJsf+MsLvbZKYNXuOHLKJrvw+x4LlcQ= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -55,8 +57,8 @@ github.com/deckarep/golang-set v0.0.0-20171013212420-1d4478f51bed h1:njG8LmGD6JC github.com/deckarep/golang-set v0.0.0-20171013212420-1d4478f51bed/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/dgraph-io/badger v1.5.4-0.20180815194500-3a87f6d9c273 h1:45qZ7jowabqhyi3l9Ervox4dhQvLGB5BJPdC8w0a77k= github.com/dgraph-io/badger v1.5.4-0.20180815194500-3a87f6d9c273/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 h1:afESQBXJEnj3fu+34X//E8Wg3nEbMJxJkwSc0tPePK0= -github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/elazarl/goproxy v0.0.0-20200809112317-0581fc3aee2d h1:rtM8HsT3NG37YPjz8sYSbUSdElP9lUsQENYzJDZDUBE= @@ -66,100 +68,96 @@ github.com/elazarl/goproxy/ext v0.0.0-20200809112317-0581fc3aee2d h1:st1tmvy+4du github.com/elazarl/goproxy/ext v0.0.0-20200809112317-0581fc3aee2d/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/florianl/go-nfqueue v1.1.1-0.20200829120558-a2f196e98ab0 h1:7ZJyJV4KiWBijCCzUPvVaqxsDxO36+KD0XKBdEN3I+8= github.com/florianl/go-nfqueue v1.1.1-0.20200829120558-a2f196e98ab0/go.mod h1:2z3Tfqwv2ueuK6h563xUHRcCh1mv38wS9EjiWiesk84= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/flynn/noise v1.0.1-0.20220214164934-d803f5c4b0f4 h1:6pcIWmKkQZdpPjs/pD9OLt0NwftBozNE0Nm5zMCG2C4= +github.com/flynn/noise v1.0.1-0.20220214164934-d803f5c4b0f4/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gaukas/godicttls v0.0.4 h1:NlRaXb3J6hAnTmWdsEKb9bcSBD6BvcIjdGdeb0zfXbk= github.com/gaukas/godicttls v0.0.4/go.mod h1:l6EenT4TLWgTdwslVb4sEMOCf7Bv0JAK67deKr9/NCI= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 h1:T6zlOdzrYuHf6HUKujm9bzkzbZ5Iv/xf6rs8BHZDpoI= github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427 h1:xh96CCAZTX8LJPFoOVRgTwZbn2DvJl8fyCyivohhSIg= github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427/go.mod h1:PdjzaU/pJUo4jTIn2rcgMFs+HqBGl/sPJLr8BI0Xq/I= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= -github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= -github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= -github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= -github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= -github.com/jsimonetti/rtnetlink v0.0.0-20210721205614-4cc3c1489576 h1:dH/k0qzR1oouF25AoMwH6FXOr16zV4WZFcYnZGpqro0= -github.com/jsimonetti/rtnetlink v0.0.0-20210721205614-4cc3c1489576/go.mod h1:qdKhcKUxYn3/QvneOvPWXXMPqktEBHnCW98wUTA3rmA= +github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b h1:Yws7RV6kZr2O7PPdT+RkbSmmOponA8i/1DuGHe8BRsM= +github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b/go.mod h1:TzDCVOZKUa79z6iXbbXqhtAflVgUKaFkZ21M5tK5tzY= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a h1:6SRny9FLB1eWasPyDUqBQnMi9NhXU01XIlB0ao89YoI= github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a/go.mod h1:TmeOqAKoDinfPfSohs14CO3VcEf7o+Bem6JiNe05yrQ= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= -github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= -github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= -github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= -github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= -github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= -github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= -github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a h1:yk5OmRew64lWdeNanQ3l0hDgUt1E8MfipPhh/GO9Tuw= -github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a/go.mod h1:qw8F9IVzxa0GpqhVAfOw8DNyo7ec/jxI6bPWPEg1MV4= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= -github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84 h1:L1jnQ6o+K3M574eez7eTxbsia6H1SfJaVpaXY33L37Q= -github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/netlink v1.7.1 h1:FdUaT/e33HjEXagwELR8R3/KL1Fq5x3G5jgHLp/BTmg= +github.com/mdlayher/netlink v1.7.1/go.mod h1:nKO5CSjE/DJjVhk/TNp6vCE1ktVxEA8VEh8drhZzxsQ= +github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw= +github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc= github.com/miekg/dns v1.1.44-0.20210804161652-ab67aa642300 h1:cpzamikkKRyu3TZF14CsVFf/CmhlrqZ+7P9aVZYtXz8= github.com/miekg/dns v1.1.44-0.20210804161652-ab67aa642300/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/mingyech/dtls/v2 v2.0.0 h1:RKN1CjBs8wct3JnrcRmqnfH3BO0ocYLEhmg62uaHq+A= -github.com/mingyech/dtls/v2 v2.0.0/go.mod h1:Jvjs/Mzb6dWhNbhnobeR2HC8nmjF5AaLezvpVo+PQ1U= github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557 h1:w1QuuAA2km2Hax+EPamrq5ZRBeaNv2vsjvgB4an0zoU= github.com/mitchellh/panicwrap v0.0.0-20170106182340-fce601fe5557/go.mod h1:QuAqW7/z+iv6aWFJdrA8kCbsF0OOJVKCICqTcYBexuY= github.com/mroth/weightedrand v1.0.0 h1:V8JeHChvl2MP1sAoXq4brElOcza+jxLkRuwvtQu8L3E= github.com/mroth/weightedrand v1.0.0/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= -github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= +github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= @@ -170,22 +168,50 @@ github.com/pebbe/zmq4 v1.2.10 h1:wQkqRZ3CZeABIeidr3e8uQZMMH5YAykA/WN0L5zkd1c= github.com/pebbe/zmq4 v1.2.10/go.mod h1:nqnPueOapVhE2wItZ0uOErngczsJdLOGkebMxaO8r48= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pion/datachannel v1.5.5 h1:10ef4kwdjije+M9d7Xm9im2Y3O6A6ccQb0zcqZcJew8= +github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0= +github.com/pion/interceptor v0.1.25 h1:pwY9r7P6ToQ3+IF0bajN0xmk/fNw/suTgaTdlwTDmhc= +github.com/pion/interceptor v0.1.25/go.mod h1:wkbPYAak5zKsfpVDYMtEfWEy8D4zL+rpxCxPImLOg3Y= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/sctp v1.8.8 h1:5EdnnKI4gpyR1a1TwbiS/wxEgcUWBHsc7ILAjARJB+U= -github.com/pion/sctp v1.8.8/go.mod h1:igF9nZBrjh5AtmKc7U30jXltsFHicFCXSmWA2GWRaWs= +github.com/pion/rtcp v1.2.10/go.mod h1:ztfEwXZNLGyF1oQDttz/ZKIBaeeg/oWbRYqzBM9TL1I= +github.com/pion/rtcp v1.2.12 h1:bKWiX93XKgDZENEXCijvHRU/wRifm6JV5DGcH6twtSM= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.2/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.5 h1:uYzINfaK+9yWs7r537z/Rc1SvT8ILjBcmDOpJcTB+OU= +github.com/pion/rtp v1.8.5/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.5/go.mod h1:SUFFfDpViyKejTAdwD1d/HQsCu+V/40cCs2nZIvC3s0= +github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= +github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= +github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/transport v0.14.1 h1:XSM6olwW+o8J4SCmOBb/BpwZypkHeyM0PGFCxNQBr40= +github.com/pion/transport v0.14.1/go.mod h1:4tGmbk00NeYA3rUa9+n+dzCCoKkcy3YlYb99Jn2fNnI= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2-0.20230802201558-f2dffd80896b/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= -github.com/pion/transport/v2 v2.2.3 h1:XcOE3/x41HOSKbl1BfyY1TF1dERx7lVvlMCbXU7kfvA= +github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4 h1:41JJK6DZQYSeVLxILA2+F4ZkKb4Xd/tFJZRFZQ9QAlo= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= +github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/turn/v2 v2.1.3 h1:pYxTVWG2gpC97opdRc5IGsQ1lJ9O/IlNhkzj7MMrGAA= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/refraction-networking/conjure v0.7.11-0.20240130155008-c8df96195ab2 h1:m2ZH6WV69otVmBpWbk8et3MypHFsjcYXTNrknQKS/PY= @@ -199,10 +225,19 @@ github.com/refraction-networking/obfs4 v0.1.2/go.mod h1:wAl/+gWiLsrcykJA3nKJHx89 github.com/refraction-networking/utls v1.3.3 h1:f/TBLX7KBciRyFH3bwupp+CE4fzoYKCirhdRcC490sw= github.com/refraction-networking/utls v1.3.3/go.mod h1:DlecWW1LMlMJu+9qpzzQqdHDT/C2LAe03EdpLUz/RL8= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507 h1:ML7ZNtcln5UBo5Wv7RIv9Xg3Pr5VuRCWLFXEwda54Y4= github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507/go.mod h1:DbI1gxrXI2jRGw7XGEUZQOOMd6PsnKzRrCKabvvMrwM= +github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= +github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -210,127 +245,134 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= +github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/wader/filtertransport v0.0.0-20200316221534-bdd9e61eee78 h1:9sreu9e9KOihf2Y0NbpyfWhd1XFDcL4GTkPYL4IvMrg= github.com/wader/filtertransport v0.0.0-20200316221534-bdd9e61eee78/go.mod h1:HazXTRLhXFyq80TQp7PUXi6BKE6mS+ydEdzEqNBKopQ= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/wlynxg/anet v0.0.1 h1:VbkEEgHxPSrRQSiyRd0pmrbcEQAEU2TTb8fb4DmSYoQ= +github.com/wlynxg/anet v0.0.1/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.5.0 h1:rzdY78Ox2T+VlXcxGxELF+6VyUXlZBhmRqZu5etLm+c= gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.5.0/go.mod h1:70bhd4JKW/+1HLfm+TMrgHJsUHG4coelMWwiVEJ2gAg= -go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= -go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go4.org/mem v0.0.0-20210711025021-927187094b94 h1:OAAkygi2Js191AJP1Ds42MhJRgeofeKGjuoUqNp1QC4= +go4.org/mem v0.0.0-20210711025021-927187094b94/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o= golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -342,15 +384,18 @@ golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4= +golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= +golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= @@ -358,11 +403,13 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= -honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +tailscale.com v1.40.0 h1:3guThaC1cCSHQ2nAk0ChQ8D5CCQmfwcQFTUcfpBrKPE= +tailscale.com v1.40.0/go.mod h1:j5vekUD4eLhLpHl/tNBps25strCOBXyiKUsdR1HhMq8= diff --git a/psiphon/TCPConn.go b/psiphon/TCPConn.go index e356f8e81..bbf924677 100644 --- a/psiphon/TCPConn.go +++ b/psiphon/TCPConn.go @@ -106,7 +106,7 @@ func DialTCP( func proxiedTcpDial( ctx context.Context, addr string, config *DialConfig) (net.Conn, error) { - interruptConns := common.NewConns() + interruptConns := common.NewConns[net.Conn]() // Note: using interruptConns to interrupt a proxy dial assumes // that the underlying proxy code will immediately exit with an @@ -259,6 +259,7 @@ func tcpDial(ctx context.Context, addr string, config *DialConfig) (net.Conn, er // Controller.establishCandidateGenerator will retry a candidate // tunnel server dials. + // Don't shuffle or otherwise mutate the slice returned by ResolveIP. permutedIndexes := prng.Perm(len(ipAddrs)) lastErr := errors.TraceNew("unknown error") diff --git a/psiphon/UDPConn.go b/psiphon/UDPConn.go index 1dedd1765..a757254f2 100644 --- a/psiphon/UDPConn.go +++ b/psiphon/UDPConn.go @@ -118,12 +118,12 @@ func NewUDPConn( // "udp4" or "udp6" was specified, so pick from either IPv4 or IPv6 // candidates. - prng.Shuffle(len(ipAddrs), func(i, j int) { - ipAddrs[i], ipAddrs[j] = ipAddrs[j], ipAddrs[i] - }) - for _, nextIPAddr := range ipAddrs { - if (network == "udp6") == (nextIPAddr.To4() == nil) { - ipAddr = nextIPAddr + // Don't shuffle or otherwise mutate the slice returned by ResolveIP. + permutedIndexes := prng.Perm(len(ipAddrs)) + + for _, i := range permutedIndexes { + if (network == "udp6") == (ipAddrs[i].To4() == nil) { + ipAddr = ipAddrs[i] break } } @@ -132,6 +132,9 @@ func NewUDPConn( } } + // When configured, attempt to synthesize IPv6 addresses from + // an IPv4 addresses for compatibility on DNS64/NAT64 networks. + // If synthesize fails, try the original addresses. if config.IPv6Synthesizer != nil { if ipAddr.To4() != nil { synthesizedIPAddress := config.IPv6Synthesizer.IPv6Synthesize(ipAddr.String()) @@ -152,14 +155,6 @@ func NewUDPConn( setAdditionalSocketOptions(socketFD) - if config.BPFProgramInstructions != nil { - err := setSocketBPF(config.BPFProgramInstructions, socketFD) - if err != nil { - controlErr = errors.Tracef("setSocketBPF failed: %s", err) - return - } - } - if config.DeviceBinder != nil { _, err := config.DeviceBinder.BindToDevice(socketFD) if err != nil { diff --git a/psiphon/common/accesscontrol/accesscontrol.go b/psiphon/common/accesscontrol/accesscontrol.go index a431b8e27..6aa1d3696 100644 --- a/psiphon/common/accesscontrol/accesscontrol.go +++ b/psiphon/common/accesscontrol/accesscontrol.go @@ -33,16 +33,15 @@ // An authorization is represented in JSON, which is then base64-encoded // for transport: // -// { -// "Authorization" : { -// "ID" : , -// "AccessType" : , -// "Expires" : -// }, -// "SigningKeyID" : , -// "Signature" : -// } -// +// { +// "Authorization" : { +// "ID" : , +// "AccessType" : , +// "Expires" : +// }, +// "SigningKeyID" : , +// "Signature" : +// } package accesscontrol import ( @@ -56,6 +55,7 @@ import ( "time" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/fxamacker/cbor/v2" "golang.org/x/crypto/hkdf" ) @@ -287,7 +287,6 @@ func VerifyAuthorization( } var auth Authorization - err = json.Unmarshal(signedAuth.Authorization, &auth) if err != nil { return nil, errors.Trace(err) @@ -311,3 +310,99 @@ func VerifyAuthorization( return &auth, nil } + +type packedAuthorization struct { + ID []byte `cbor:"1,keyasint,omitempty"` + AccessType string `cbor:"2,keyasint,omitempty"` + Expires time.Time `cbor:"3,keyasint,omitempty"` + SigningKeyID []byte `cbor:"4,keyasint,omitempty"` + Signature []byte `cbor:"5,keyasint,omitempty"` +} + +// PackAuthorizations re-encodes a list of authorizations using the more +// compact encoding that is used in protocol.EncodePackedAPIParameters. +func PackAuthorizations( + auths []string, + cborEncoding cbor.EncMode) ([]byte, error) { + + // Note: not using protocol.CBOREncoding directly due to import cycle. + + packedAuths := make([]packedAuthorization, len(auths)) + + for i, authBase64 := range auths { + + authJSON, err := base64.StdEncoding.DecodeString(authBase64) + if err != nil { + return nil, errors.Trace(err) + } + + var signedAuth signedAuthorization + err = json.Unmarshal(authJSON, &signedAuth) + if err != nil { + return nil, errors.Trace(err) + } + + var auth Authorization + err = json.Unmarshal(signedAuth.Authorization, &auth) + if err != nil { + return nil, errors.Trace(err) + } + + packedAuths[i] = packedAuthorization{ + ID: auth.ID, + AccessType: auth.AccessType, + Expires: auth.Expires, + SigningKeyID: signedAuth.SigningKeyID, + Signature: signedAuth.Signature, + } + } + + packedAuthsCBOR, err := cborEncoding.Marshal(packedAuths) + if err != nil { + return nil, errors.Trace(err) + } + + return packedAuthsCBOR, nil +} + +// UnpackAuthorizations re-encodes a list of authorizations encoded with +// PackAuthorizations back to the standard, IssueAuthorization encoding. +func UnpackAuthorizations(packedAuthsCBOR []byte) ([]string, error) { + + var packedAuths []packedAuthorization + err := cbor.Unmarshal(packedAuthsCBOR, &packedAuths) + if err != nil { + return nil, errors.Trace(err) + } + + auths := make([]string, len(packedAuths)) + + for i, packedAuth := range packedAuths { + + auth := Authorization{ + ID: packedAuth.ID, + AccessType: packedAuth.AccessType, + Expires: packedAuth.Expires, + } + + authJSON, err := json.Marshal(&auth) + if err != nil { + return nil, errors.Trace(err) + } + + signedAuth := signedAuthorization{ + Authorization: authJSON, + SigningKeyID: packedAuth.SigningKeyID, + Signature: packedAuth.Signature, + } + + signedAuthJSON, err := json.Marshal(&signedAuth) + if err != nil { + return nil, errors.Trace(err) + } + + auths[i] = base64.StdEncoding.EncodeToString(signedAuthJSON) + } + + return auths, nil +} diff --git a/psiphon/common/api.go b/psiphon/common/api.go old mode 100755 new mode 100644 diff --git a/psiphon/common/certificate.go b/psiphon/common/certificate.go index aa131872e..d66c45112 100644 --- a/psiphon/common/certificate.go +++ b/psiphon/common/certificate.go @@ -23,8 +23,10 @@ import ( "crypto/rand" "crypto/rsa" "crypto/sha1" + "crypto/sha256" "crypto/x509" "crypto/x509/pkix" + "encoding/base64" "encoding/pem" "math/big" "time" @@ -33,7 +35,9 @@ import ( ) // GenerateWebServerCertificate creates a self-signed web server certificate, -// using the specified host name (commonName). +// using the specified host name. The host name is used as the subject common +// name and a SAN DNS name. +// // This is primarily intended for use by MeekServer to generate on-the-fly, // self-signed TLS certificates for fronted HTTPS mode. In this case, the nature // of the certificate is non-circumvention; it only has to be acceptable to the @@ -46,27 +50,31 @@ import ( // Psiphon web server certificates for test/example configurations. If these Psiphon // web server certificates are used in production, the same caveats about // fingerprints apply. -func GenerateWebServerCertificate(commonName string) (string, string, error) { +// +// The verification pin return value is a hash of the certificate public key +// which is compatible with FrontingSpec.VerifyPins, and is intended for use +// in testing. +func GenerateWebServerCertificate(hostname string) (string, string, string, error) { // Based on https://golang.org/src/crypto/tls/generate_cert.go // TODO: use other key types: anti-fingerprint by varying params rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) } // Validity period is 1 or 2 years, starting 1 to 6 months ago. validityPeriodYears := 1 delta, err := rand.Int(rand.Reader, big.NewInt(2)) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) } validityPeriodYears += int(delta.Int64()) retroactiveMonths := 1 delta, err = rand.Int(rand.Reader, big.NewInt(6)) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) } retroactiveMonths += int(delta.Int64()) notBefore := time.Now().Truncate(time.Hour).UTC().AddDate(0, -retroactiveMonths, 0) @@ -75,24 +83,27 @@ func GenerateWebServerCertificate(commonName string) (string, string, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) } publicKeyBytes, err := x509.MarshalPKIXPublicKey(rsaKey.Public()) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) } // as per RFC3280 sec. 4.2.1.2 subjectKeyID := sha1.Sum(publicKeyBytes) var subject pkix.Name - if commonName != "" { - subject = pkix.Name{CommonName: commonName} + var dnsNames []string + if hostname != "" { + subject = pkix.Name{CommonName: hostname} + dnsNames = []string{hostname} } template := x509.Certificate{ SerialNumber: serialNumber, Subject: subject, + DNSNames: dnsNames, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, @@ -111,8 +122,15 @@ func GenerateWebServerCertificate(commonName string) (string, string, error) { rsaKey.Public(), rsaKey) if err != nil { - return "", "", errors.Trace(err) + return "", "", "", errors.Trace(err) + } + + cert, err := x509.ParseCertificate(derCert) + if err != nil { + return "", "", "", errors.Trace(err) } + digest := sha256.Sum256(cert.RawSubjectPublicKeyInfo) + pin := base64.StdEncoding.EncodeToString(digest[:]) webServerCertificate := pem.EncodeToMemory( &pem.Block{ @@ -128,5 +146,5 @@ func GenerateWebServerCertificate(commonName string) (string, string, error) { }, ) - return string(webServerCertificate), string(webServerPrivateKey), nil + return string(webServerCertificate), string(webServerPrivateKey), pin, nil } diff --git a/psiphon/common/certificate_test.go b/psiphon/common/certificate_test.go index 6eac64ed9..6315e17f4 100644 --- a/psiphon/common/certificate_test.go +++ b/psiphon/common/certificate_test.go @@ -26,7 +26,9 @@ import ( func TestGenerateWebServerCertificate(t *testing.T) { - certificate, privateKey, err := GenerateWebServerCertificate("www.example.com") + // Note: pin is tested in psiphon.testTLSDialerCompatibility + + certificate, privateKey, _, err := GenerateWebServerCertificate("www.example.com") if err != nil { t.Errorf("GenerateWebServerCertificate failed: %s", err) } diff --git a/psiphon/common/crypto/ssh/handshake.go b/psiphon/common/crypto/ssh/handshake.go index 5fcf96f0f..f54e39f47 100644 --- a/psiphon/common/crypto/ssh/handshake.go +++ b/psiphon/common/crypto/ssh/handshake.go @@ -16,6 +16,7 @@ import ( // [Psiphon] + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" ) @@ -467,6 +468,12 @@ const ( kexStrictServer = "kex-strict-s-v00@openssh.com" ) +// [Psiphon] +// For testing only. Enables testing support for legacy clients, which have +// only the legacy algorithm lists and no weak-MAC or new-server-algos logic. +// Not safe for concurrent access. +var testLegacyClient = false + // sendKexInit sends a key change message. func (t *handshakeTransport) sendKexInit() error { t.mu.Lock() @@ -550,8 +557,8 @@ func (t *handshakeTransport) sendKexInit() error { // its KEX using the specified seed; deterministically adjust own // randomized KEX to ensure negotiation succeeds. // - // When NoEncryptThenMACHash is specified, do not use Encrypt-then-MAC has - // algorithms. + // When NoEncryptThenMACHash is specified, do not use Encrypt-then-MAC + // hash algorithms. // // Limitations: // @@ -632,6 +639,59 @@ func (t *handshakeTransport) sendKexInit() error { return list } + avoid := func(PRNG *prng.PRNG, list, avoidList, addList []string) []string { + + // Avoid negotiating items in avoidList, by moving a non-avoid + // item to the front of the list; either by swapping with a + // later, non-avoid item, or inserting a new item. + + if len(list) < 1 { + return list + } + if !common.Contains(avoidList, list[0]) { + // The first item isn't on the avoid list. + return list + } + for i := 1; i < len(list); i++ { + if !common.Contains(avoidList, list[i]) { + // Swap with a later, existing non-avoid item. + list[0], list[i] = list[i], list[0] + return list + } + } + for _, item := range permute(PRNG, addList) { + if !common.Contains(avoidList, item) { + // Insert a randomly selected non-avoid item. + return append([]string{item}, list...) + } + } + // Can't avoid. + return list + } + + addSome := func(PRNG *prng.PRNG, list, addList []string) []string { + newList := list + for _, item := range addList { + if PRNG.FlipCoin() { + index := PRNG.Range(0, len(newList)) + newList = append( + newList[:index], + append([]string{item}, newList[index:]...)...) + } + } + return newList + } + + toFront := func(list []string, item string) []string { + for index, existingItem := range list { + if existingItem == item { + list[0], list[index] = list[index], list[0] + return list + } + } + return append([]string{item}, list...) + } + firstKexAlgo := func(kexAlgos []string) (string, bool) { for _, kexAlgo := range kexAlgos { switch kexAlgo { @@ -662,10 +722,9 @@ func (t *handshakeTransport) sendKexInit() error { // server's algorithms; (b) random truncation by the server doesn't // select only new algorithms unknown to existing clients. // - // TODO: add a versioning mechanism, such as a SSHv2 capability, to - // allow for servers with new algorithm lists, where older clients - // won't try to connect to these servers, and new clients know to use - // non-legacy lists in the PeerKEXPRNGSeed mechanism. + // New algorithms are then randomly inserted only after the legacy + // lists are processed in legacy PRNG state order. + legacyServerKexAlgos := []string{ kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, @@ -681,9 +740,11 @@ func (t *handshakeTransport) sendKexInit() error { "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", } legacyServerNoEncryptThenMACs := []string{ - "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"} - - isServer := len(t.hostKeys) > 0 + "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", + } + if t.config.NoEncryptThenMACHash { + legacyServerMACs = legacyServerNoEncryptThenMACs + } PRNG := prng.NewPRNGWithSeed(t.config.KEXPRNGSeed) @@ -691,95 +752,163 @@ func (t *handshakeTransport) sendKexInit() error { startingCiphers := msg.CiphersClientServer startingMACs := msg.MACsClientServer - if isServer { + // testLegacyClient: legacy clients are older clients which start with + // the same algorithm lists as legacyServer and have neither the + // newServer-algorithm nor the weak-MAC KEX prediction logic. + + if isServer || testLegacyClient { startingKexAlgos = legacyServerKexAlgos startingCiphers = legacyServerCiphers startingMACs = legacyServerMACs + if t.config.NoEncryptThenMACHash { + startingMACs = legacyServerNoEncryptThenMACs + } } - msg.KexAlgos = selectKexAlgos(PRNG, startingKexAlgos) + kexAlgos := selectKexAlgos(PRNG, startingKexAlgos) ciphers := truncate(PRNG, permute(PRNG, startingCiphers)) - msg.CiphersClientServer = ciphers - msg.CiphersServerClient = ciphers MACs := truncate(PRNG, permute(PRNG, startingMACs)) - msg.MACsClientServer = MACs - msg.MACsServerClient = MACs + var hostKeyAlgos []string if isServer { - msg.ServerHostKeyAlgos = permute(PRNG, msg.ServerHostKeyAlgos) + hostKeyAlgos = permute(PRNG, msg.ServerHostKeyAlgos) } else { // Must offer KeyAlgoRSA to Psiphon server. - msg.ServerHostKeyAlgos = retain( + hostKeyAlgos = retain( PRNG, truncate(PRNG, permute(PRNG, msg.ServerHostKeyAlgos)), KeyAlgoRSA) } - if !isServer && t.config.PeerKEXPRNGSeed != nil { + // To ensure compatibility with server KEX prediction in legacy + // clients, all preceeding PRNG operations must be performed in the + // given order, and all before the following operations. - // Generate the peer KEX and make adjustments if negotiation would - // fail. This assumes that PeerKEXPRNGSeed remains static (in - // Psiphon, the peer is the server and PeerKEXPRNGSeed is derived - // from the server entry); and that the PRNG is invoked in the - // exact same order on the peer (i.e., the code block immediately - // above is what the peer runs); and that the peer sets - // NoEncryptThenMACHash in the same cases. + // Avoid negotiating weak MAC algorithms. Servers will ensure that no + // weakMACs are the highest priority item. Clients will make + // adjustments after predicting the server KEX. - PeerPRNG := prng.NewPRNGWithSeed(t.config.PeerKEXPRNGSeed) + weakMACs := []string{"hmac-sha1-96"} + if isServer { + MACs = avoid(PRNG, MACs, weakMACs, startingMACs) + } + + // Randomly insert new algorithms. For servers, the preceeding legacy + // operations will ensure selection of at least one legacy algorithm + // of each type, ensuring compatibility with legacy clients. + + newServerKexAlgos := []string{ + kexAlgoCurve25519SHA256, kexAlgoDH16SHA512, + "kex-strict-s-v00@openssh.com", + } + newServerCiphers := []string{ + gcm256CipherID, + } + newServerMACs := []string{ + "hmac-sha2-512-etm@openssh.com", "hmac-sha2-512", + } + newServerNoEncryptThenMACs := []string{ + "hmac-sha2-512", + } + if t.config.NoEncryptThenMACHash { + newServerMACs = newServerNoEncryptThenMACs + } + + if isServer { + kexAlgos = addSome(PRNG, kexAlgos, newServerKexAlgos) + ciphers = addSome(PRNG, ciphers, newServerCiphers) + MACs = addSome(PRNG, MACs, newServerMACs) + } + + msg.KexAlgos = kexAlgos + msg.CiphersClientServer = ciphers + msg.CiphersServerClient = ciphers + msg.MACsClientServer = MACs + msg.MACsServerClient = MACs + msg.ServerHostKeyAlgos = hostKeyAlgos + + if !isServer && t.config.PeerKEXPRNGSeed != nil { + + // Generate the server KEX and make adjustments if negotiation + // would fail. This assumes that PeerKEXPRNGSeed remains static + // (in Psiphon, the peer is the server and PeerKEXPRNGSeed is + // derived from the server entry); and that the PRNG is invoked + // in the exact same order on the server (i.e., the code block + // immediately above is what the peer runs); and that the server + // sets NoEncryptThenMACHash in the same cases. + // // Note that only the client sends "ext-info-c" // and "kex-strict-c-v00@openssh.com" and only the server // sends "kex-strict-s-v00@openssh.com", so these will never // match and do not need to be filtered out before findCommon. - // - // The following assumes that the server always starts with the - // default preferredKexAlgos along with - // "kex-strict-s-v00@openssh.com" appended before randomizing. - - serverKexAlgos := append( - append([]string(nil), preferredKexAlgos...), - "kex-strict-s-v00@openssh.com") - serverCiphers := preferredCiphers - serverMACS := supportedMACs - serverNoEncryptThenMACs := noEncryptThenMACs - - // Switch to using the legacy algorithms that the server currently - // downgrades to (see comment above). - // - // TODO: for servers without legacy backwards compatibility - // concerns, skip the following lines. - serverKexAlgos = legacyServerKexAlgos - serverCiphers = legacyServerCiphers - serverMACS = legacyServerMACs - serverNoEncryptThenMACs = legacyServerNoEncryptThenMACs - serverKexAlgos = selectKexAlgos(PeerPRNG, serverKexAlgos) + PeerPRNG := prng.NewPRNGWithSeed(t.config.PeerKEXPRNGSeed) + + startingKexAlgos := legacyServerKexAlgos + startingCiphers := legacyServerCiphers + startingMACs := legacyServerMACs + if t.config.NoEncryptThenMACHash { + startingMACs = legacyServerNoEncryptThenMACs + } + + // The server populates msg.ServerHostKeyAlgos based on the host + // key type, which, for Psiphon servers, is "ssh-rsa", so + // algorithmsForKeyFormat("ssh-rsa") predicts the server + // msg.ServerHostKeyAlgos value. + startingHostKeyAlgos := algorithmsForKeyFormat("ssh-rsa") + + serverKexAlgos := selectKexAlgos(PeerPRNG, startingKexAlgos) + serverCiphers := truncate(PeerPRNG, permute(PeerPRNG, startingCiphers)) + serverMACs := truncate(PeerPRNG, permute(PeerPRNG, startingMACs)) + + if !testLegacyClient { + + // This value is not used, but the identical PRNG operation must be + // performed in order to predict the PeerPRNG state. + _ = permute(PeerPRNG, startingHostKeyAlgos) + + serverMACs = avoid(PeerPRNG, serverMACs, weakMACs, startingMACs) + + serverKexAlgos = addSome(PeerPRNG, serverKexAlgos, newServerKexAlgos) + serverCiphers = addSome(PeerPRNG, serverCiphers, newServerCiphers) + serverMACs = addSome(PeerPRNG, serverMACs, newServerMACs) + } + + // Adjust to ensure compatibility with the server KEX. if _, err := findCommon("", msg.KexAlgos, serverKexAlgos); err != nil { if kexAlgo, ok := firstKexAlgo(serverKexAlgos); ok { - msg.KexAlgos = retain(PRNG, msg.KexAlgos, kexAlgo) + kexAlgos = retain(PRNG, msg.KexAlgos, kexAlgo) } } - serverCiphers = truncate(PeerPRNG, permute(PeerPRNG, serverCiphers)) if _, err := findCommon("", ciphers, serverCiphers); err != nil { ciphers = retain(PRNG, ciphers, serverCiphers[0]) - msg.CiphersClientServer = ciphers - msg.CiphersServerClient = ciphers } - if t.config.NoEncryptThenMACHash { - serverMACS = serverNoEncryptThenMACs + if _, err := findCommon("", MACs, serverMACs); err != nil { + MACs = retain(PRNG, MACs, serverMACs[0]) } - serverMACS = truncate(PeerPRNG, permute(PeerPRNG, serverMACS)) - if _, err := findCommon("", MACs, serverMACS); err != nil { - MACs = retain(PRNG, MACs, serverMACS[0]) - msg.MACsClientServer = MACs - msg.MACsServerClient = MACs + // Avoid negotiating weak MAC algorithms. + // + // Legacy clients, without this logic, may still select only weak + // MACs or predict only weak MACs for the server KEX. + + commonMAC, _ := findCommon("", MACs, serverMACs) + if common.Contains(weakMACs, commonMAC) { + // serverMACs[0] is not in weakMACs. + MACs = toFront(MACs, serverMACs[0]) } + + msg.KexAlgos = kexAlgos + msg.CiphersClientServer = ciphers + msg.CiphersServerClient = ciphers + msg.MACsClientServer = MACs + msg.MACsServerClient = MACs } // Offer "zlib@openssh.com", which is offered by OpenSSH. Compression diff --git a/psiphon/common/crypto/ssh/randomized_kex_test.go b/psiphon/common/crypto/ssh/randomized_kex_test.go index bec716954..83742a767 100644 --- a/psiphon/common/crypto/ssh/randomized_kex_test.go +++ b/psiphon/common/crypto/ssh/randomized_kex_test.go @@ -33,15 +33,31 @@ import ( ) func TestRandomizedSSHKEXes(t *testing.T) { + err := runTestRandomizedSSHKEXes(false) + if err != nil { + t.Errorf("runTestRandomizedSSHKEXes failed: %s", err) + return + } +} + +func TestLegacyRandomizedSSHKEXes(t *testing.T) { + err := runTestRandomizedSSHKEXes(true) + if err != nil { + t.Errorf("runTestRandomizedSSHKEXes failed: %s", err) + return + } +} + +func runTestRandomizedSSHKEXes(legacyClient bool) error { rsaKey, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { - t.Fatalf("rsa.GenerateKey failed: %s", err) + return errors.Trace(err) } signer, err := NewSignerFromKey(rsaKey) if err != nil { - t.Fatalf("NewSignerFromKey failed: %s", err) + return errors.Trace(err) } publicKey := signer.PublicKey() @@ -49,6 +65,11 @@ func TestRandomizedSSHKEXes(t *testing.T) { username := "username" password := "password" + testLegacyClient = legacyClient + defer func() { + testLegacyClient = false + }() + for _, doPeerKEXPRNGSeed := range []bool{true, false} { failed := false @@ -57,17 +78,17 @@ func TestRandomizedSSHKEXes(t *testing.T) { clientSeed, err := prng.NewSeed() if err != nil { - t.Fatalf("prng.NewSeed failed: %s", err) + return errors.Trace(err) } serverSeed, err := prng.NewSeed() if err != nil { - t.Fatalf("prng.NewSeed failed: %s", err) + return errors.Trace(err) } clientConn, serverConn, err := netPipe() if err != nil { - t.Fatalf("netPipe failed: %s", err) + return errors.Trace(err) } testGroup, _ := errgroup.WithContext(context.Background()) @@ -102,6 +123,23 @@ func TestRandomizedSSHKEXes(t *testing.T) { return errors.Trace(err) } + if !legacyClient { + // Ensure weak MAC is not negotiated + for _, p := range []packetCipher{ + clientSSHConn.(*connection).transport.conn.(*transport).reader.packetCipher, + clientSSHConn.(*connection).transport.conn.(*transport).writer.packetCipher} { + switch c := p.(type) { + case *gcmCipher, *chacha20Poly1305Cipher: + // No weak MAC. + case *streamPacketCipher: + // The only weak MAC, "hmac-sha1-96", is also the only truncatingMAC. + if _, ok := c.mac.(truncatingMAC); ok { + return errors.TraceNew("weak MAC negotiated") + } + } + } + } + clientSSHConn.Close() clientConn.Close() return nil @@ -140,8 +178,7 @@ func TestRandomizedSSHKEXes(t *testing.T) { // Expect no failure to negotiates when setting PeerKEXPRNGSeed. if doPeerKEXPRNGSeed { - t.Fatalf("goroutine failed: %s", err) - + return errors.Tracef("unexpected failure to negotiate: %v", err) } else { failed = true break @@ -151,7 +188,8 @@ func TestRandomizedSSHKEXes(t *testing.T) { // Expect at least one failure to negotiate when not setting PeerKEXPRNGSeed. if !doPeerKEXPRNGSeed && !failed { - t.Fatalf("unexpected success") + errors.TraceNew("unexpected success") } } + return nil } diff --git a/psiphon/common/fragmentor/fragmentor.go b/psiphon/common/fragmentor/fragmentor.go index ff86cbd72..29a902f02 100644 --- a/psiphon/common/fragmentor/fragmentor.go +++ b/psiphon/common/fragmentor/fragmentor.go @@ -195,22 +195,28 @@ func (c *Conn) GetMetrics() common.LogFields { logFields := make(common.LogFields) - if c.bytesFragmented == 0 { - return logFields - } + if c.bytesFragmented > 0 { - var prefix string - if c.config.isUpstream { - prefix = "upstream_" - } else { - prefix = "downstream_" + var prefix string + if c.config.isUpstream { + prefix = "upstream_" + } else { + prefix = "downstream_" + } + + logFields[prefix+"bytes_fragmented"] = c.bytesFragmented + logFields[prefix+"min_bytes_written"] = c.minBytesWritten + logFields[prefix+"max_bytes_written"] = c.maxBytesWritten + logFields[prefix+"min_delayed"] = int(c.minDelayed / time.Microsecond) + logFields[prefix+"max_delayed"] = int(c.maxDelayed / time.Microsecond) } - logFields[prefix+"bytes_fragmented"] = c.bytesFragmented - logFields[prefix+"min_bytes_written"] = c.minBytesWritten - logFields[prefix+"max_bytes_written"] = c.maxBytesWritten - logFields[prefix+"min_delayed"] = int(c.minDelayed / time.Microsecond) - logFields[prefix+"max_delayed"] = int(c.maxDelayed / time.Microsecond) + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := c.Conn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } return logFields } diff --git a/psiphon/common/fragmentor/fragmentor_test.go b/psiphon/common/fragmentor/fragmentor_test.go index d477fa352..d3f188ed0 100644 --- a/psiphon/common/fragmentor/fragmentor_test.go +++ b/psiphon/common/fragmentor/fragmentor_test.go @@ -62,7 +62,7 @@ func TestFragmentor(t *testing.T) { if err != nil { t.Fatalf("parameters.NewParameters failed: %s", err) } - _, err = params.Set("", false, map[string]interface{}{ + _, err = params.Set("", 0, map[string]interface{}{ "FragmentorProbability": 1.0, "FragmentorLimitProtocols": protocol.TunnelProtocols{tunnelProtocol}, "FragmentorMinTotalBytes": bytesToFragment, diff --git a/psiphon/common/inproxy/api.go b/psiphon/common/inproxy/api.go new file mode 100644 index 000000000..be66626d1 --- /dev/null +++ b/psiphon/common/inproxy/api.go @@ -0,0 +1,868 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" +) + +const ( + ProxyProtocolVersion1 = 1 + MaxCompartmentIDs = 10 +) + +// ID is a unique identifier used to identify inproxy connections and actors. +type ID [32]byte + +// MakeID generates a new ID using crypto/rand. +func MakeID() (ID, error) { + var id ID + for { + _, err := rand.Read(id[:]) + if err != nil { + return id, errors.Trace(err) + } + if !id.Zero() { + return id, nil + } + } +} + +// IDFromString returns an ID given its string encoding. +func IDFromString(s string) (ID, error) { + var id ID + return id, errors.Trace(fromBase64String(s, id[:])) +} + +func fromBase64String(s string, b []byte) error { + value, err := base64.RawStdEncoding.DecodeString(s) + if err != nil { + return errors.Trace(err) + } + if len(value) != len(b) { + return errors.TraceNew("invalid length") + } + copy(b, value) + return nil +} + +// IDsFromStrings returns a list of IDs given a list of string encodings. +func IDsFromStrings(strs []string) ([]ID, error) { + var ids []ID + for _, str := range strs { + id, err := IDFromString(str) + if err != nil { + return nil, errors.Trace(err) + } + ids = append(ids, id) + } + return ids, nil +} + +// MarshalText emits IDs as base64. +func (id ID) MarshalText() ([]byte, error) { + return []byte(id.String()), nil +} + +// String emits IDs as base64. +func (id ID) String() string { + return base64.RawStdEncoding.EncodeToString([]byte(id[:])) +} + +// Equal indicates whether two IDs are equal. It uses a constant time +// comparison. +func (id ID) Equal(x ID) bool { + return subtle.ConstantTimeCompare(id[:], x[:]) == 1 +} + +// Zero indicates whether the ID is the zero value. +func (id ID) Zero() bool { + var zero ID + return id.Equal(zero) +} + +// HaveCommonIDs indicates whether two lists of IDs have a common entry. +func HaveCommonIDs(a, b []ID) bool { + for _, x := range a { + for _, y := range b { + // Each comparison is constant time, but the number of comparisons + // varies and might leak the size of a list. + if x.Equal(y) { + return true + } + } + } + return false +} + +// NetworkType is the type of a network, such as WiFi or Mobile. This enum is +// used for compact API message encoding. +type NetworkType int32 + +const ( + NetworkTypeUnknown NetworkType = iota + NetworkTypeWiFi + NetworkTypeMobile +) + +// NetworkProtocol is an Internet protocol, such as TCP or UDP. This enum is +// used for compact API message encoding. +type NetworkProtocol int32 + +const ( + NetworkProtocolTCP NetworkProtocol = iota + NetworkProtocolUDP +) + +// NetworkProtocolFromString converts a "net" package network protocol string +// value to a NetworkProtocol. +func NetworkProtocolFromString(networkProtocol string) (NetworkProtocol, error) { + switch networkProtocol { + case "tcp": + return NetworkProtocolTCP, nil + case "udp": + return NetworkProtocolUDP, nil + } + var p NetworkProtocol + return p, errors.Tracef("unknown network protocol: %s", networkProtocol) +} + +// String converts a NetworkProtocol to a "net" package network protocol string. +func (p NetworkProtocol) String() string { + switch p { + case NetworkProtocolTCP: + return "tcp" + case NetworkProtocolUDP: + return "udp" + } + // This case will cause net dials to fail. + return "" +} + +// IsStream indicates if the NetworkProtocol is stream-oriented (e.g., TCP) +// and not packet-oriented (e.g., UDP). +func (p NetworkProtocol) IsStream() bool { + switch p { + case NetworkProtocolTCP: + return true + case NetworkProtocolUDP: + return false + } + return false +} + +// ProxyMetrics are network topolology and resource metrics provided by a +// proxy to a broker. The broker uses this information when matching proxies +// and clients. +type ProxyMetrics struct { + BaseAPIParameters protocol.PackedAPIParameters `cbor:"1,keyasint,omitempty"` + ProxyProtocolVersion int32 `cbor:"2,keyasint,omitempty"` + NATType NATType `cbor:"3,keyasint,omitempty"` + PortMappingTypes PortMappingTypes `cbor:"4,keyasint,omitempty"` + MaxClients int32 `cbor:"6,keyasint,omitempty"` + ConnectingClients int32 `cbor:"7,keyasint,omitempty"` + ConnectedClients int32 `cbor:"8,keyasint,omitempty"` + LimitUpstreamBytesPerSecond int64 `cbor:"9,keyasint,omitempty"` + LimitDownstreamBytesPerSecond int64 `cbor:"10,keyasint,omitempty"` + PeakUpstreamBytesPerSecond int64 `cbor:"11,keyasint,omitempty"` + PeakDownstreamBytesPerSecond int64 `cbor:"12,keyasint,omitempty"` +} + +// ClientMetrics are network topolology metrics provided by a client to a +// broker. The broker uses this information when matching proxies and +// clients. +type ClientMetrics struct { + BaseAPIParameters protocol.PackedAPIParameters `cbor:"1,keyasint,omitempty"` + ProxyProtocolVersion int32 `cbor:"2,keyasint,omitempty"` + NATType NATType `cbor:"3,keyasint,omitempty"` + PortMappingTypes PortMappingTypes `cbor:"4,keyasint,omitempty"` +} + +// ProxyAnnounceRequest is an API request sent from a proxy to a broker, +// announcing that it is available for a client connection. Proxies send one +// ProxyAnnounceRequest for each available client connection. The broker will +// match the proxy with a a client and return WebRTC connection information +// in the response. +// +// PersonalCompartmentIDs limits the clients to those that supply one of the +// specified compartment IDs; personal compartment IDs are distributed from +// proxy operators to client users out-of-band and provide optional access +// control. +// +// The proxy's session public key is an implicit and cryptographically +// verified proxy ID. +type ProxyAnnounceRequest struct { + PersonalCompartmentIDs []ID `cbor:"1,keyasint,omitempty"` + Metrics *ProxyMetrics `cbor:"2,keyasint,omitempty"` +} + +// WebRTCSessionDescription is compatible with pion/webrtc.SessionDescription +// and facilitates the PSIPHON_ENABLE_INPROXY build tag exclusion of pion +// dependencies. +type WebRTCSessionDescription struct { + Type int `cbor:"1,keyasint,omitempty"` + SDP string `cbor:"2,keyasint,omitempty"` +} + +// TODO: send ProxyAnnounceRequest/ClientOfferRequest.Metrics only with the +// first request in a session and cache. + +// ProxyAnnounceResponse returns the connection information for a matched +// client. To establish a WebRTC connection, the proxy uses the client's +// offer SDP to create its own answer SDP and send that to the broker in a +// subsequent ProxyAnswerRequest. The ConnectionID is a unique identifier for +// this single connection and must be relayed back in the ProxyAnswerRequest. +// +// ClientRootObfuscationSecret is generated (or replayed) by the client and +// sent to the proxy and used to drive obfuscation operations. +// +// DestinationAddress is the dial address for the Psiphon server the proxy is +// to relay client traffic with. The broker validates that the dial address +// corresponds to a valid Psiphon server. +// +// OperatorMessageJSON is an optional message bundle to be forwarded to the +// user interface for display to the user; for example, to alert the proxy +// operator of configuration issue; the JSON schema is not defined here. +type ProxyAnnounceResponse struct { + OperatorMessageJSON string `cbor:"1,keyasint,omitempty"` + TacticsPayload []byte `cbor:"2,keyasint,omitempty"` + Limited bool `cbor:"3,keyasint,omitempty"` + NoMatch bool `cbor:"4,keyasint,omitempty"` + ConnectionID ID `cbor:"5,keyasint,omitempty"` + ClientProxyProtocolVersion int32 `cbor:"6,keyasint,omitempty"` + ClientOfferSDP WebRTCSessionDescription `cbor:"7,keyasint,omitempty"` + ClientRootObfuscationSecret ObfuscationSecret `cbor:"8,keyasint,omitempty"` + DoDTLSRandomization bool `cbor:"9,keyasint,omitempty"` + TrafficShapingParameters *DataChannelTrafficShapingParameters `cbor:"10,keyasint,omitempty"` + NetworkProtocol NetworkProtocol `cbor:"11,keyasint,omitempty"` + DestinationAddress string `cbor:"12,keyasint,omitempty"` +} + +// ClientOfferRequest is an API request sent from a client to a broker, +// requesting a proxy connection. The client sends its WebRTC offer SDP with +// this request. +// +// Clients specify known compartment IDs and are matched with proxies in those +// compartments. CommonCompartmentIDs are comparment IDs managed by Psiphon +// and revealed through tactics or bundled with server lists. +// PersonalCompartmentIDs are compartment IDs shared privately between users, +// out-of-band. +// +// ClientRootObfuscationSecret is generated (or replayed) by the client and +// sent to the proxy and used to drive obfuscation operations. +// +// To specify the Psiphon server it wishes to proxy to, the client sends the +// full, digitally signed Psiphon server entry to the broker and also the +// specific dial address that it has selected for that server. The broker +// validates the server entry signature, the server in-proxy capability, and +// that the dial address corresponds to the network protocol, IP address or +// domain, and destination port for a valid Psiphon tunnel protocol run by +// the specified server entry. +type ClientOfferRequest struct { + Metrics *ClientMetrics `cbor:"1,keyasint,omitempty"` + CommonCompartmentIDs []ID `cbor:"2,keyasint,omitempty"` + PersonalCompartmentIDs []ID `cbor:"3,keyasint,omitempty"` + ClientOfferSDP WebRTCSessionDescription `cbor:"4,keyasint,omitempty"` + ICECandidateTypes ICECandidateTypes `cbor:"5,keyasint,omitempty"` + ClientRootObfuscationSecret ObfuscationSecret `cbor:"6,keyasint,omitempty"` + DoDTLSRandomization bool `cbor:"7,keyasint,omitempty"` + TrafficShapingParameters *DataChannelTrafficShapingParameters `cbor:"8,keyasint,omitempty"` + PackedDestinationServerEntry []byte `cbor:"9,keyasint,omitempty"` + NetworkProtocol NetworkProtocol `cbor:"10,keyasint,omitempty"` + DestinationAddress string `cbor:"11,keyasint,omitempty"` +} + +// DataChannelTrafficShapingParameters specifies a data channel traffic +// shaping configuration, including random padding and decoy messages. +// Clients determine their own traffic shaping configuration, and generate +// and send a configuration for the peer proxy to use. +type DataChannelTrafficShapingParameters struct { + MinPaddedMessages int `cbor:"1,keyasint,omitempty"` + MaxPaddedMessages int `cbor:"2,keyasint,omitempty"` + MinPaddingSize int `cbor:"3,keyasint,omitempty"` + MaxPaddingSize int `cbor:"4,keyasint,omitempty"` + MinDecoyMessages int `cbor:"5,keyasint,omitempty"` + MaxDecoyMessages int `cbor:"6,keyasint,omitempty"` + MinDecoySize int `cbor:"7,keyasint,omitempty"` + MaxDecoySize int `cbor:"8,keyasint,omitempty"` + DecoyMessageProbability float64 `cbor:"9,keyasint,omitempty"` +} + +// ClientOfferResponse returns the connecting information for a matched proxy. +// The proxy's WebRTC SDP is an answer to the offer sent in +// ClientOfferRequest and is used to begin dialing the WebRTC connection. +// +// Once the client completes its connection to the Psiphon server, it must +// relay a BrokerServerReport to the server on behalf of the broker. This +// relay is conducted within a secure session. First, the client sends +// RelayPacketToServer to the server. Then the client relays any responses to +// the broker using ClientRelayedPacketRequests and continues to relay using +// ClientRelayedPacketRequests until complete. ConnectionID identifies this +// connection and its relayed BrokerServerReport. +type ClientOfferResponse struct { + Limited bool `cbor:"1,keyasint,omitempty"` + NoMatch bool `cbor:"2,keyasint,omitempty"` + ConnectionID ID `cbor:"3,keyasint,omitempty"` + SelectedProxyProtocolVersion int32 `cbor:"4,keyasint,omitempty"` + ProxyAnswerSDP WebRTCSessionDescription `cbor:"5,keyasint,omitempty"` + RelayPacketToServer []byte `cbor:"6,keyasint,omitempty"` +} + +// TODO: Encode SDPs using CBOR without field names, simliar to packed metrics? + +// ProxyAnswerRequest is an API request sent from a proxy to a broker, +// following ProxyAnnounceResponse, with the WebRTC answer SDP corresponding +// to the client offer SDP received in ProxyAnnounceResponse. ConnectionID +// identifies the connection begun in ProxyAnnounceResponse. +// +// If the proxy was unable to establish an answer SDP or failed for some other +// reason, it should still send ProxyAnswerRequest with AnswerError +// populated; the broker will signal the client to abort this connection. +type ProxyAnswerRequest struct { + ConnectionID ID `cbor:"1,keyasint,omitempty"` + SelectedProxyProtocolVersion int32 `cbor:"2,keyasint,omitempty"` + ProxyAnswerSDP WebRTCSessionDescription `cbor:"3,keyasint,omitempty"` + ICECandidateTypes ICECandidateTypes `cbor:"4,keyasint,omitempty"` + AnswerError string `cbor:"5,keyasint,omitempty"` +} + +// ProxyAnswerResponse is the acknowledgement for a ProxyAnswerRequest. +type ProxyAnswerResponse struct { +} + +// ClientRelayedPacketRequest is an API request sent from a client to a +// broker, relaying a secure session packet from the Psiphon server to the +// broker. This relay is a continuation of the broker/server exchange begun +// with ClientOfferResponse.RelayPacketToServer. PacketFromServer is the next +// packet from the server. +// +// When a broker attempts to use an existing session which has expired on the +// server, the packet from the server may contain a signed reset session +// token, which is used to automatically reset and start establishing a new +// session before relaying the payload. +type ClientRelayedPacketRequest struct { + ConnectionID ID `cbor:"1,keyasint,omitempty"` + PacketFromServer []byte `cbor:"2,keyasint,omitempty"` +} + +// ClientRelayedPacketResponse returns the next packet from the broker to the +// server. When PacketToServer is empty, the broker/server exchange is done +// and the client stops relaying packets. +type ClientRelayedPacketResponse struct { + PacketToServer []byte `cbor:"1,keyasint,omitempty"` +} + +// BrokerServerReport is a one-way API call sent from a broker to a +// Psiphon server. This delivers, to the server, information that neither the +// client nor the proxy is trusted to report. ProxyID is the proxy ID to be +// logged with server_tunnel to attribute traffic to a specific proxy. +// ClientIP is the original client IP as seen by the broker; this is the IP +// value to be used in GeoIP-related operations including traffic rules, +// tactics, and OSL progress. ProxyIP is the proxy IP as seen by the broker; +// this value should match the Psiphon's server observed client IP. +// Additional fields are metrics to be logged with server_tunnel. +// +// Using a one-way message here means that, once a broker/server session is +// established, the entire relay can be encasulated in a single additional +// field sent in the Psiphon API handshake. This minimizes observable and +// potentially fingerprintable traffic flows as the client does not need to +// relay any further session packets before starting the tunnel. The +// trade-off is that the broker doesn't get an indication from the server +// that the message was accepted or rejects and cannot directly, in real time +// log any tunnel error associated with the server rejecting the message, or +// log that the relay was completed successfully. These events can be logged +// on the server and logs reconciled using the in-proxy Connection ID. +type BrokerServerReport struct { + ProxyID ID `cbor:"1,keyasint,omitempty"` + ConnectionID ID `cbor:"2,keyasint,omitempty"` + MatchedCommonCompartments bool `cbor:"3,keyasint,omitempty"` + MatchedPersonalCompartments bool `cbor:"4,keyasint,omitempty"` + ProxyNATType NATType `cbor:"5,keyasint,omitempty"` + ProxyPortMappingTypes PortMappingTypes `cbor:"6,keyasint,omitempty"` + ClientNATType NATType `cbor:"7,keyasint,omitempty"` + ClientPortMappingTypes PortMappingTypes `cbor:"8,keyasint,omitempty"` + ClientIP string `cbor:"9,keyasint,omitempty"` + ProxyIP string `cbor:"10,keyasint,omitempty"` +} + +// GetNetworkType extracts the network_type from base API metrics and returns +// a corresponding NetworkType. This is the one base metric that is used in +// the broker logic, and not simply logged. +func GetNetworkType(packedBaseParams protocol.PackedAPIParameters) NetworkType { + baseNetworkType, ok := packedBaseParams.GetNetworkType() + if !ok { + return NetworkTypeUnknown + } + switch baseNetworkType { + case "WIFI": + return NetworkTypeWiFi + case "MOBILE": + return NetworkTypeMobile + } + return NetworkTypeUnknown +} + +// Sanity check values. +const ( + maxICECandidateTypes = 10 + maxPortMappingTypes = 10 + + maxPaddedMessages = 100 + maxPaddingSize = 16384 + maxDecoyMessages = 100 + maxDecoySize = 16384 +) + +// ValidateAndGetParametersAndLogFields validates the ProxyMetrics and returns +// Psiphon API parameters for processing and common.LogFields for logging. +func (metrics *ProxyMetrics) ValidateAndGetParametersAndLogFields( + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) (common.APIParameters, common.LogFields, error) { + + if metrics.BaseAPIParameters == nil { + return nil, nil, errors.TraceNew("missing base API parameters") + } + + baseParams, err := protocol.DecodePackedAPIParameters(metrics.BaseAPIParameters) + if err != nil { + return nil, nil, errors.Trace(err) + } + + err = baseAPIParameterValidator(baseParams) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if metrics.ProxyProtocolVersion != ProxyProtocolVersion1 { + return nil, nil, errors.Tracef("invalid proxy protocol version: %v", metrics.ProxyProtocolVersion) + } + + if !metrics.NATType.IsValid() { + return nil, nil, errors.Tracef("invalid NAT type: %v", metrics.NATType) + } + + if len(metrics.PortMappingTypes) > maxPortMappingTypes { + return nil, nil, errors.Tracef("invalid portmapping types length: %d", len(metrics.PortMappingTypes)) + } + + if !metrics.PortMappingTypes.IsValid() { + return nil, nil, errors.Tracef("invalid portmapping types: %v", metrics.PortMappingTypes) + } + + logFields := formatter(geoIPData, baseParams) + + logFields["proxy_protocol_version"] = metrics.ProxyProtocolVersion + logFields["nat_type"] = metrics.NATType + logFields["port_mapping_types"] = metrics.PortMappingTypes + logFields["max_clients"] = metrics.MaxClients + logFields["connecting_clients"] = metrics.ConnectingClients + logFields["connected_clients"] = metrics.ConnectedClients + logFields["limit_upstream_bytes_per_second"] = metrics.LimitUpstreamBytesPerSecond + logFields["limit_downstream_bytes_per_second"] = metrics.LimitDownstreamBytesPerSecond + logFields["peak_upstream_bytes_per_second"] = metrics.PeakUpstreamBytesPerSecond + logFields["peak_downstream_bytes_per_second"] = metrics.PeakDownstreamBytesPerSecond + + return baseParams, logFields, nil +} + +// ValidateAndGetLogFields validates the ClientMetrics and returns +// common.LogFields for logging. +func (metrics *ClientMetrics) ValidateAndGetLogFields( + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) (common.LogFields, error) { + + if metrics.BaseAPIParameters == nil { + return nil, errors.TraceNew("missing base API parameters") + } + + baseParams, err := protocol.DecodePackedAPIParameters(metrics.BaseAPIParameters) + if err != nil { + return nil, errors.Trace(err) + } + + err = baseAPIParameterValidator(baseParams) + if err != nil { + return nil, errors.Trace(err) + } + + if metrics.ProxyProtocolVersion != ProxyProtocolVersion1 { + return nil, errors.Tracef("invalid proxy protocol version: %v", metrics.ProxyProtocolVersion) + } + + if !metrics.NATType.IsValid() { + return nil, errors.Tracef("invalid NAT type: %v", metrics.NATType) + } + + if len(metrics.PortMappingTypes) > maxPortMappingTypes { + return nil, errors.Tracef("invalid portmapping types length: %d", len(metrics.PortMappingTypes)) + } + + if !metrics.PortMappingTypes.IsValid() { + return nil, errors.Tracef("invalid portmapping types: %v", metrics.PortMappingTypes) + } + + logFields := formatter(geoIPData, baseParams) + + logFields["proxy_protocol_version"] = metrics.ProxyProtocolVersion + logFields["nat_type"] = metrics.NATType + logFields["port_mapping_types"] = metrics.PortMappingTypes + + return logFields, nil +} + +// ValidateAndGetParametersAndLogFields validates the ProxyAnnounceRequest and +// returns Psiphon API parameters for processing and common.LogFields for +// logging. +func (request *ProxyAnnounceRequest) ValidateAndGetParametersAndLogFields( + maxCompartmentIDs int, + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) (common.APIParameters, common.LogFields, error) { + + if len(request.PersonalCompartmentIDs) > maxCompartmentIDs { + return nil, nil, errors.Tracef("invalid compartment IDs length: %d", len(request.PersonalCompartmentIDs)) + } + + if request.Metrics == nil { + return nil, nil, errors.TraceNew("missing metrics") + } + + apiParams, logFields, err := request.Metrics.ValidateAndGetParametersAndLogFields( + baseAPIParameterValidator, formatter, geoIPData) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // PersonalCompartmentIDs are user-generated and shared out-of-band; + // values are not logged since they may link users. + + hasPersonalCompartmentIDs := len(request.PersonalCompartmentIDs) > 0 + + logFields["has_personal_compartment_ids"] = hasPersonalCompartmentIDs + + return apiParams, logFields, nil +} + +// ValidateAndGetLogFields validates the ClientOfferRequest and returns +// common.LogFields for logging. +func (request *ClientOfferRequest) ValidateAndGetLogFields( + maxCompartmentIDs int, + lookupGeoIP LookupGeoIP, + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) ([]byte, common.LogFields, error) { + + if len(request.CommonCompartmentIDs) > maxCompartmentIDs { + return nil, nil, errors.Tracef( + "invalid compartment IDs length: %d", len(request.CommonCompartmentIDs)) + } + + if len(request.PersonalCompartmentIDs) > maxCompartmentIDs { + return nil, nil, errors.Tracef( + "invalid compartment IDs length: %d", len(request.PersonalCompartmentIDs)) + } + + // The client offer SDP may contain no ICE candidates. + errorOnNoCandidates := false + + // Client offer SDP candidate addresses must match the country and ASN of + // the client. Don't facilitate connections to arbitrary destinations. + filteredSDP, sdpMetrics, err := filterSDPAddresses( + []byte(request.ClientOfferSDP.SDP), errorOnNoCandidates, lookupGeoIP, geoIPData) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // The client's self-reported ICECandidateTypes are used instead of the + // candidate types that can be derived from the SDP, since port mapping + // types are edited into the SDP in a way that makes them + // indistinguishable from host candidate types. + + if !request.ICECandidateTypes.IsValid() { + return nil, nil, errors.Tracef( + "invalid ICE candidate types: %v", request.ICECandidateTypes) + } + + if request.Metrics == nil { + return nil, nil, errors.TraceNew("missing metrics") + } + + logFields, err := request.Metrics.ValidateAndGetLogFields( + baseAPIParameterValidator, formatter, geoIPData) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if request.TrafficShapingParameters != nil { + err := request.TrafficShapingParameters.Validate() + if err != nil { + return nil, nil, errors.Trace(err) + } + } + + // CommonCompartmentIDs are generated and managed and are a form of + // obfuscation secret, so are not logged. PersonalCompartmentIDs are + // user-generated and shared out-of-band; values are not logged since + // they may link users. + + hasCommonCompartmentIDs := len(request.CommonCompartmentIDs) > 0 + hasPersonalCompartmentIDs := len(request.PersonalCompartmentIDs) > 0 + + logFields["has_common_compartment_ids"] = hasCommonCompartmentIDs + logFields["has_personal_compartment_ids"] = hasPersonalCompartmentIDs + logFields["ice_candidate_types"] = request.ICECandidateTypes + logFields["has_IPv6"] = sdpMetrics.hasIPv6 + logFields["filtered_ice_candidates"] = sdpMetrics.filteredICECandidates + + return filteredSDP, logFields, nil +} + +// Validate validates the that client has not specified excess traffic shaping +// padding or decoy traffic. +func (params *DataChannelTrafficShapingParameters) Validate() error { + + if params.MinPaddedMessages < 0 || + params.MinPaddedMessages > params.MaxPaddedMessages || + params.MaxPaddedMessages > maxPaddedMessages { + return errors.TraceNew("invalid padded messages") + } + + if params.MinPaddingSize < 0 || + params.MinPaddingSize > params.MaxPaddingSize || + params.MaxPaddingSize > maxPaddingSize { + return errors.TraceNew("invalid padding size") + } + + if params.MinDecoyMessages < 0 || + params.MinDecoyMessages > params.MaxDecoyMessages || + params.MaxDecoyMessages > maxDecoyMessages { + return errors.TraceNew("invalid decoy messages") + } + + if params.MinDecoySize < 0 || + params.MinDecoySize > params.MaxDecoySize || + params.MaxDecoySize > maxDecoySize { + return errors.TraceNew("invalid decoy size") + } + + return nil +} + +// ValidateAndGetLogFields validates the ProxyAnswerRequest and returns +// common.LogFields for logging. +func (request *ProxyAnswerRequest) ValidateAndGetLogFields( + lookupGeoIP LookupGeoIP, + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) ([]byte, common.LogFields, error) { + + // The proxy answer SDP must contain at least one ICE candidate. + errorOnNoCandidates := true + + // Proxy answer SDP candidate addresses must match the country and ASN of + // the proxy. Don't facilitate connections to arbitrary destinations. + filteredSDP, sdpMetrics, err := filterSDPAddresses( + []byte(request.ProxyAnswerSDP.SDP), errorOnNoCandidates, lookupGeoIP, geoIPData) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // The proxy's self-reported ICECandidateTypes are used instead of the + // candidate types that can be derived from the SDP, since port mapping + // types are edited into the SDP in a way that makes them + // indistinguishable from host candidate types. + + if !request.ICECandidateTypes.IsValid() { + return nil, nil, errors.Tracef( + "invalid ICE candidate types: %v", request.ICECandidateTypes) + } + + if request.SelectedProxyProtocolVersion != ProxyProtocolVersion1 { + return nil, nil, errors.Tracef( + "invalid select proxy protocol version: %v", request.SelectedProxyProtocolVersion) + } + + logFields := formatter(geoIPData, common.APIParameters{}) + + logFields["connection_id"] = request.ConnectionID + logFields["ice_candidate_types"] = request.ICECandidateTypes + logFields["has_IPv6"] = sdpMetrics.hasIPv6 + logFields["filtered_ice_candidates"] = sdpMetrics.filteredICECandidates + logFields["answer_error"] = request.AnswerError + + return filteredSDP, logFields, nil +} + +// ValidateAndGetLogFields validates the ClientRelayedPacketRequest and returns +// common.LogFields for logging. +func (request *ClientRelayedPacketRequest) ValidateAndGetLogFields( + baseAPIParameterValidator common.APIParameterValidator, + formatter common.APIParameterLogFieldFormatter, + geoIPData common.GeoIPData) (common.LogFields, error) { + + logFields := formatter(geoIPData, common.APIParameters{}) + + logFields["connection_id"] = request.ConnectionID + + return logFields, nil +} + +// ValidateAndGetLogFields validates the BrokerServerReport and returns +// common.LogFields for logging. +func (request *BrokerServerReport) ValidateAndGetLogFields() (common.LogFields, error) { + + if !request.ProxyNATType.IsValid() { + return nil, errors.Tracef("invalid proxy NAT type: %v", request.ProxyNATType) + } + + if !request.ProxyPortMappingTypes.IsValid() { + return nil, errors.Tracef("invalid proxy portmapping types: %v", request.ProxyPortMappingTypes) + } + + if !request.ClientNATType.IsValid() { + return nil, errors.Tracef("invalid client NAT type: %v", request.ClientNATType) + } + + if !request.ClientPortMappingTypes.IsValid() { + return nil, errors.Tracef("invalid client portmapping types: %v", request.ClientPortMappingTypes) + } + + // Neither ClientIP nor ProxyIP is logged. + + logFields := common.LogFields{} + + logFields["proxy_id"] = request.ProxyID + logFields["connection_id"] = request.ConnectionID + logFields["matched_common_compartments"] = request.MatchedCommonCompartments + logFields["matched_personal_compartments"] = request.MatchedPersonalCompartments + logFields["proxy_nat_type"] = request.ProxyNATType + logFields["proxy_port_mapping_types"] = request.ProxyPortMappingTypes + logFields["client_nat_type"] = request.ClientNATType + logFields["client_port_mapping_types"] = request.ClientPortMappingTypes + + return common.LogFields{}, nil +} + +func MarshalProxyAnnounceRequest(request *ProxyAnnounceRequest) ([]byte, error) { + payload, err := marshalRecord(request, recordTypeAPIProxyAnnounceRequest) + return payload, errors.Trace(err) +} + +func UnmarshalProxyAnnounceRequest(payload []byte) (*ProxyAnnounceRequest, error) { + var request *ProxyAnnounceRequest + err := unmarshalRecord(recordTypeAPIProxyAnnounceRequest, payload, &request) + return request, errors.Trace(err) +} + +func MarshalProxyAnnounceResponse(response *ProxyAnnounceResponse) ([]byte, error) { + payload, err := marshalRecord(response, recordTypeAPIProxyAnnounceResponse) + return payload, errors.Trace(err) +} + +func UnmarshalProxyAnnounceResponse(payload []byte) (*ProxyAnnounceResponse, error) { + var response *ProxyAnnounceResponse + err := unmarshalRecord(recordTypeAPIProxyAnnounceResponse, payload, &response) + return response, errors.Trace(err) +} + +func MarshalProxyAnswerRequest(request *ProxyAnswerRequest) ([]byte, error) { + payload, err := marshalRecord(request, recordTypeAPIProxyAnswerRequest) + return payload, errors.Trace(err) +} + +func UnmarshalProxyAnswerRequest(payload []byte) (*ProxyAnswerRequest, error) { + var request *ProxyAnswerRequest + err := unmarshalRecord(recordTypeAPIProxyAnswerRequest, payload, &request) + return request, errors.Trace(err) +} + +func MarshalProxyAnswerResponse(response *ProxyAnswerResponse) ([]byte, error) { + payload, err := marshalRecord(response, recordTypeAPIProxyAnswerResponse) + return payload, errors.Trace(err) +} + +func UnmarshalProxyAnswerResponse(payload []byte) (*ProxyAnswerResponse, error) { + var response *ProxyAnswerResponse + err := unmarshalRecord(recordTypeAPIProxyAnswerResponse, payload, &response) + return response, errors.Trace(err) +} + +func MarshalClientOfferRequest(request *ClientOfferRequest) ([]byte, error) { + payload, err := marshalRecord(request, recordTypeAPIClientOfferRequest) + return payload, errors.Trace(err) +} + +func UnmarshalClientOfferRequest(payload []byte) (*ClientOfferRequest, error) { + var request *ClientOfferRequest + err := unmarshalRecord(recordTypeAPIClientOfferRequest, payload, &request) + return request, errors.Trace(err) +} + +func MarshalClientOfferResponse(response *ClientOfferResponse) ([]byte, error) { + payload, err := marshalRecord(response, recordTypeAPIClientOfferResponse) + return payload, errors.Trace(err) +} + +func UnmarshalClientOfferResponse(payload []byte) (*ClientOfferResponse, error) { + var response *ClientOfferResponse + err := unmarshalRecord(recordTypeAPIClientOfferResponse, payload, &response) + return response, errors.Trace(err) +} + +func MarshalClientRelayedPacketRequest(request *ClientRelayedPacketRequest) ([]byte, error) { + payload, err := marshalRecord(request, recordTypeAPIClientRelayedPacketRequest) + return payload, errors.Trace(err) +} + +func UnmarshalClientRelayedPacketRequest(payload []byte) (*ClientRelayedPacketRequest, error) { + var request *ClientRelayedPacketRequest + err := unmarshalRecord(recordTypeAPIClientRelayedPacketRequest, payload, &request) + return request, errors.Trace(err) +} + +func MarshalClientRelayedPacketResponse(response *ClientRelayedPacketResponse) ([]byte, error) { + payload, err := marshalRecord(response, recordTypeAPIClientRelayedPacketResponse) + return payload, errors.Trace(err) +} + +func UnmarshalClientRelayedPacketResponse(payload []byte) (*ClientRelayedPacketResponse, error) { + var response *ClientRelayedPacketResponse + err := unmarshalRecord(recordTypeAPIClientRelayedPacketResponse, payload, &response) + return response, errors.Trace(err) +} + +func MarshalBrokerServerReport(request *BrokerServerReport) ([]byte, error) { + payload, err := marshalRecord(request, recordTypeAPIBrokerServerReport) + return payload, errors.Trace(err) +} + +func UnmarshalBrokerServerReport(payload []byte) (*BrokerServerReport, error) { + var request *BrokerServerReport + err := unmarshalRecord(recordTypeAPIBrokerServerReport, payload, &request) + return request, errors.Trace(err) +} diff --git a/psiphon/common/inproxy/broker.go b/psiphon/common/inproxy/broker.go new file mode 100644 index 000000000..722cd963b --- /dev/null +++ b/psiphon/common/inproxy/broker.go @@ -0,0 +1,1466 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + std_errors "errors" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/Psiphon-Labs/consistent" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/cespare/xxhash" + lrucache "github.com/cognusion/go-cache-lru" + "github.com/fxamacker/cbor/v2" +) + +const ( + + // BrokerMaxRequestBodySize is the maximum request size, that should be + // enforced by the provided broker transport. + BrokerMaxRequestBodySize = 65536 + + // BrokerEndPointName is the standard name for referencing an endpoint + // that services broker requests. + BrokerEndPointName = "inproxy-broker" + + brokerProxyAnnounceTimeout = 2 * time.Minute + brokerClientOfferTimeout = 10 * time.Second + brokerPendingServerReportsTTL = 60 * time.Second + brokerPendingServerReportsMaxSize = 100000 + brokerMetricName = "inproxy_broker" +) + +// LookupGeoIP is a callback for providing GeoIP lookup service. +type LookupGeoIP func(IP string) common.GeoIPData + +// ExtendTransportTimeout is a callback that extends the timeout for a +// server-side broker transport handler, facilitating request-specific +// timeouts including long-polling for proxy announcements. +type ExtendTransportTimeout func(timeout time.Duration) + +// GetTactics is a callback which returns the appropriate tactics for the +// specified client/proxy GeoIP data and API parameters. +type GetTactics func(common.GeoIPData, common.APIParameters) ([]byte, string, error) + +// Broker is the in-proxy broker component, which matches clients and proxies +// and provides WebRTC signaling functionalty. +// +// Both clients and proxies send requests to the broker to obtain matches and +// exchange WebRTC SDPs. Broker does not implement a transport or obfuscation +// layer; instead that is provided by the HandleSessionPacket caller. A +// typical implementation would provide a domain fronted web server which +// runs a Broker and calls Broker.HandleSessionPacket to handle web requests +// encapsulating secure session packets. +type Broker struct { + config *BrokerConfig + initiatorSessions *InitiatorSessions + responderSessions *ResponderSessions + matcher *Matcher + pendingServerReports *lrucache.Cache + + commonCompartmentsMutex sync.Mutex + commonCompartments *consistent.Consistent + + proxyAnnounceTimeout int64 + clientOfferTimeout int64 + pendingServerReportsTTL int64 + + maxCompartmentIDs int64 +} + +// BrokerConfig specifies the configuration for a Broker. +type BrokerConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // CommonCompartmentIDs is a list of common compartment IDs to apply to + // proxies that announce without personal compartment ID. Common + // compartment IDs are managed by Psiphon and distributed to clients via + // tactics or embedded in OSLs. Clients must supply a valid compartment + // ID to match with a proxy. + // + // A BrokerConfig must supply at least one compartment ID, or + // SetCompartmentIDs must be called with at least one compartment ID + // before calling Start. + // + // When only one, single common compartment ID is configured, it can serve + // as an (obfuscation) secret that clients must obtain, via tactics, to + // enable in-proxy participation. + CommonCompartmentIDs []ID + + // AllowProxy is a callback which can indicate whether a proxy with the + // given GeoIP data is allowed to match with common compartment ID + // clients. Proxies with personal compartment IDs are always allowed. + AllowProxy func(common.GeoIPData) bool + + // AllowClient is a callback which can indicate whether a client with the + // given GeoIP data is allowed to match with common compartment ID + // proxies. Clients are always allowed to match based on personal + // compartment ID. + AllowClient func(common.GeoIPData) bool + + // AllowDomainFrontedDestinations is a callback which can indicate whether + // a client with the given GeoIP data is allowed to specify a proxied + // destination for a domain fronted protocol. When false, only direct + // address destinations are allowed. + // + // While tactics may may be set to instruct clients to use only direct + // server tunnel protocols, with IP address destinations, this callback + // adds server-side enforcement. + AllowDomainFrontedDestinations func(common.GeoIPData) bool + + // LookupGeoIP provides GeoIP lookup service. + LookupGeoIP LookupGeoIP + + // APIParameterValidator is a callback that validates base API metrics. + APIParameterValidator common.APIParameterValidator + + // APIParameterValidator is a callback that formats base API metrics. + APIParameterLogFieldFormatter common.APIParameterLogFieldFormatter + + // GetTactics provides a tactics lookup service. + GetTactics GetTactics + + // IsValidServerEntryTag is a callback which checks if the specified + // server entry tag is on the list of valid and active Psiphon server + // entry tags. + IsValidServerEntryTag func(serverEntryTag string) bool + + // PrivateKey is the broker's secure session long term private key. + PrivateKey SessionPrivateKey + + // ObfuscationRootSecret broker's secure session long term obfuscation key. + ObfuscationRootSecret ObfuscationSecret + + // ServerEntrySignaturePublicKey is the key used to verify Psiphon server + // entry signatures. + ServerEntrySignaturePublicKey string + + // These timeout parameters may be used to override defaults. + ProxyAnnounceTimeout time.Duration + ClientOfferTimeout time.Duration + PendingServerReportsTTL time.Duration + + // Announcement queue limit configuration. + MatcherAnnouncementLimitEntryCount int + MatcherAnnouncementRateLimitQuantity int + MatcherAnnouncementRateLimitInterval time.Duration + MatcherAnnouncementNonlimitedProxyIDs []ID + + // Offer queue limit configuration. + MatcherOfferLimitEntryCount int + MatcherOfferRateLimitQuantity int + MatcherOfferRateLimitInterval time.Duration + + // MaxCompartmentIDs specifies the maximum number of compartment IDs that + // can be included, per list, in one request. If 0, the value + // MaxCompartmentIDs is used. + MaxCompartmentIDs int +} + +// NewBroker initializes a new Broker. +func NewBroker(config *BrokerConfig) (*Broker, error) { + + // initiatorSessions are secure sessions initiated by the broker and used + // to send BrokerServerReports to servers. The servers will be + // configured to establish sessions only with brokers with specified + // public keys. + + initiatorSessions := NewInitiatorSessions(config.PrivateKey) + + // responderSessions are secure sessions initiated by clients and proxies + // and used to send requests to the broker. Clients and proxies are + // configured to establish sessions only with specified broker public keys. + + responderSessions, err := NewResponderSessions( + config.PrivateKey, config.ObfuscationRootSecret) + if err != nil { + return nil, errors.Trace(err) + } + + b := &Broker{ + config: config, + initiatorSessions: initiatorSessions, + responderSessions: responderSessions, + matcher: NewMatcher(&MatcherConfig{ + Logger: config.Logger, + + AnnouncementLimitEntryCount: config.MatcherAnnouncementLimitEntryCount, + AnnouncementRateLimitQuantity: config.MatcherAnnouncementRateLimitQuantity, + AnnouncementRateLimitInterval: config.MatcherAnnouncementRateLimitInterval, + AnnouncementNonlimitedProxyIDs: config.MatcherAnnouncementNonlimitedProxyIDs, + OfferLimitEntryCount: config.MatcherOfferLimitEntryCount, + OfferRateLimitQuantity: config.MatcherOfferRateLimitQuantity, + OfferRateLimitInterval: config.MatcherOfferRateLimitInterval, + }), + + proxyAnnounceTimeout: int64(config.ProxyAnnounceTimeout), + clientOfferTimeout: int64(config.ClientOfferTimeout), + pendingServerReportsTTL: int64(config.PendingServerReportsTTL), + + maxCompartmentIDs: int64(common.ValueOrDefault(config.MaxCompartmentIDs, MaxCompartmentIDs)), + } + + b.pendingServerReports = lrucache.NewWithLRU( + common.ValueOrDefault(config.PendingServerReportsTTL, brokerPendingServerReportsTTL), + 1*time.Minute, + brokerPendingServerReportsMaxSize) + + if len(config.CommonCompartmentIDs) > 0 { + err = b.initializeCommonCompartmentIDHashing(config.CommonCompartmentIDs) + if err != nil { + return nil, errors.Trace(err) + } + } + + return b, nil +} + +func (b *Broker) Start() error { + + if !b.isCommonCompartmentIDHashingInitialized() { + return errors.TraceNew("missing common compartment IDs") + } + + return errors.Trace(b.matcher.Start()) +} + +func (b *Broker) Stop() { + b.matcher.Stop() +} + +// SetCommonCompartmentIDs sets a new list of common compartment IDs, +// replacing the previous configuration. +func (b *Broker) SetCommonCompartmentIDs(commonCompartmentIDs []ID) error { + + // TODO: initializeCommonCompartmentIDHashing is called regardless whether + // commonCompartmentIDs changes the previous configuration. To avoid the + // overhead of consistent hashing initialization in + // initializeCommonCompartmentIDHashing, add a mechanism to first quickly + // check for changes? + + return errors.Trace(b.initializeCommonCompartmentIDHashing(commonCompartmentIDs)) +} + +// SetTimeouts sets new timeout values, replacing the previous configuration. +// New timeout values do not apply to currently active announcement or offer +// requests. +func (b *Broker) SetTimeouts( + proxyAnnounceTimeout time.Duration, + clientOfferTimeout time.Duration, + pendingServerReportsTTL time.Duration) { + + atomic.StoreInt64(&b.proxyAnnounceTimeout, int64(proxyAnnounceTimeout)) + atomic.StoreInt64(&b.clientOfferTimeout, int64(clientOfferTimeout)) + atomic.StoreInt64(&b.pendingServerReportsTTL, int64(pendingServerReportsTTL)) +} + +// SetLimits sets new queue limit values, replacing the previous +// configuration. New limits are only partially applied to existing queue +// states; see Matcher.SetLimits. +func (b *Broker) SetLimits( + matcherAnnouncementLimitEntryCount int, + matcherAnnouncementRateLimitQuantity int, + matcherAnnouncementRateLimitInterval time.Duration, + matcherAnnouncementNonlimitedProxyIDs []ID, + matcherOfferLimitEntryCount int, + matcherOfferRateLimitQuantity int, + matcherOfferRateLimitInterval time.Duration, + maxCompartmentIDs int) { + + b.matcher.SetLimits( + matcherAnnouncementLimitEntryCount, + matcherAnnouncementRateLimitQuantity, + matcherAnnouncementRateLimitInterval, + matcherAnnouncementNonlimitedProxyIDs, + matcherOfferLimitEntryCount, + matcherOfferRateLimitQuantity, + matcherOfferRateLimitInterval) + + atomic.StoreInt64( + &b.maxCompartmentIDs, + int64(common.ValueOrDefault(maxCompartmentIDs, MaxCompartmentIDs))) +} + +// HandleSessionPacket handles a session packet from a client or proxy and +// provides a response packet. The packet is part of a secure session and may +// be a session handshake message, an expired session reset token, or a +// session-wrapped request payload. Request payloads are routed to API +// request endpoints. +// +// The caller is expected to provide a transport obfuscation layer, such as +// domain fronted HTTPs. The session has an obfuscation layer that ensures +// that packets are fully random, randomly padded, and cannot be replayed. +// This makes session packets suitable to embed as plaintext in some +// transports. +// +// The caller is responsible for rate limiting and enforcing timeouts and +// maximum payload size checks. +// +// Secure sessions support multiplexing concurrent requests, as long as the +// provided transport, for example HTTP/2, supports this as well. +// +// The input ctx should be canceled if the client/proxy disconnects from the +// transport while HandleSessionPacket is running, since long-polling proxy +// announcement requests will otherwise remain blocked until eventual +// timeout; net/http does this. +// +// When HandleSessionPacket returns an error, the transport provider should +// apply anti-probing mechanisms, as the client/proxy may be a prober or +// scanner. +func (b *Broker) HandleSessionPacket( + ctx context.Context, + extendTransportTimeout ExtendTransportTimeout, + transportLogFields common.LogFields, + brokerClientIP string, + geoIPData common.GeoIPData, + inPacket []byte) ([]byte, error) { + + // handleUnwrappedRequest handles requests after session unwrapping. + // responderSessions.HandlePacket handles both session establishment and + // request unwrapping, and invokes handleUnwrappedRequest once a session + // is established and a valid request unwrapped. + + handleUnwrappedRequest := func(initiatorID ID, unwrappedRequestPayload []byte) ([]byte, error) { + + recordType, err := peekRecordPreambleType(unwrappedRequestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + var responsePayload []byte + + switch recordType { + case recordTypeAPIProxyAnnounceRequest: + responsePayload, err = b.handleProxyAnnounce( + ctx, + extendTransportTimeout, + transportLogFields, + brokerClientIP, + geoIPData, + initiatorID, + unwrappedRequestPayload) + if err != nil { + return nil, errors.Trace(err) + } + case recordTypeAPIProxyAnswerRequest: + responsePayload, err = b.handleProxyAnswer( + ctx, + extendTransportTimeout, + transportLogFields, + brokerClientIP, + geoIPData, + initiatorID, + unwrappedRequestPayload) + if err != nil { + return nil, errors.Trace(err) + } + case recordTypeAPIClientOfferRequest: + responsePayload, err = b.handleClientOffer( + ctx, + extendTransportTimeout, + transportLogFields, + brokerClientIP, + geoIPData, + initiatorID, + unwrappedRequestPayload) + if err != nil { + return nil, errors.Trace(err) + } + case recordTypeAPIClientRelayedPacketRequest: + responsePayload, err = b.handleClientRelayedPacket( + ctx, + extendTransportTimeout, + transportLogFields, + geoIPData, + initiatorID, + unwrappedRequestPayload) + if err != nil { + return nil, errors.Trace(err) + } + default: + return nil, errors.Tracef("unexpected API record type %v", recordType) + } + + return responsePayload, nil + + } + + // HandlePacket returns both a packet and an error in the expired session + // reset token case. Log the error here, clear it, and return the + // packetto be relayed back to the broker client. + + outPacket, err := b.responderSessions.HandlePacket( + inPacket, handleUnwrappedRequest) + if err != nil { + if outPacket == nil { + return nil, errors.Trace(err) + } + b.config.Logger.WithTraceFields(common.LogFields{"error": err}).Warning( + "HandlePacket returned packet and error") + } + return outPacket, nil +} + +// handleProxyAnnounce receives a proxy announcement, awaits a matching +// client, and returns the client offer in the response. handleProxyAnnounce +// has a long timeout so this request can idle until a matching client +// arrives. +func (b *Broker) handleProxyAnnounce( + ctx context.Context, + extendTransportTimeout ExtendTransportTimeout, + transportLogFields common.LogFields, + proxyIP string, + geoIPData common.GeoIPData, + initiatorID ID, + requestPayload []byte) (retResponse []byte, retErr error) { + + startTime := time.Now() + + var logFields common.LogFields + var newTacticsTag string + var clientOffer *MatchOffer + var matchMetrics *MatchMetrics + var timedOut bool + var limitedErr error + + // As a future enhancement, a broker could initiate its own test + // connection to the proxy to verify its effectiveness, including + // simulating a symmetric NAT client. + + // Each announcement represents availability for a single client matching. + // Proxies with multiple client availability will send multiple requests. + // + // The announcement request and response could be extended to allow the + // proxy to specify availability for multiple clients in the request, and + // multiple client offers returned in the response. + // + // If, as we expect, proxies run on home ISPs have limited upstream + // bandwidth, they will support only a couple of concurrent clients, and + // the simple single-client-announcment model may be sufficient. Also, if + // the transport is HTTP/2, multiple requests can be multiplexed over a + // single connection (and session) in any case. + + // The proxy ID is an implicit parameter: it's the proxy's session public + // key. As part of the session handshake, the proxy has proven that it + // has the corresponding private key. Proxy IDs are logged to attribute + // traffic to a specific proxy. + + proxyID := initiatorID + + // Generate a connection ID. This ID is used to associate proxy + // announcments, client offers, and proxy answers, as well as associating + // Psiphon tunnels with in-proxy pairings. + connectionID, err := MakeID() + if err != nil { + return nil, errors.Trace(err) + } + + // Always log the outcome. + defer func() { + if logFields == nil { + logFields = b.config.APIParameterLogFieldFormatter(geoIPData, nil) + } + logFields["broker_event"] = "proxy-announce" + logFields["proxy_id"] = proxyID + logFields["elapsed_time"] = time.Since(startTime) / time.Millisecond + logFields["connection_id"] = connectionID + if newTacticsTag != "" { + logFields["new_tactics_tag"] = newTacticsTag + } + if clientOffer != nil { + // Log the target Psiphon server ID (diagnostic ID). The presence + // of this field indicates that a match was made. + logFields["destination_server_id"] = clientOffer.DestinationServerID + } + if timedOut { + logFields["timed_out"] = true + } + if retErr != nil { + logFields["error"] = retErr.Error() + } else if limitedErr != nil { + logFields["error"] = limitedErr.Error() + } + logFields.Add(transportLogFields) + logFields.Add(matchMetrics.GetMetrics()) + b.config.Logger.LogMetric(brokerMetricName, logFields) + }() + + announceRequest, err := UnmarshalProxyAnnounceRequest(requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + var apiParams common.APIParameters + apiParams, logFields, err = announceRequest.ValidateAndGetParametersAndLogFields( + int(atomic.LoadInt64(&b.maxCompartmentIDs)), + b.config.APIParameterValidator, + b.config.APIParameterLogFieldFormatter, + geoIPData) + if err != nil { + return nil, errors.Trace(err) + } + + // Fetch new tactics for the proxy, if required, using the tactics tag + // that should be included with the API parameters. A tacticsPayload may + // be returned when there are no new tactics, and this is relayed back to + // the proxy, after matching, so that it can extend the TTL for its + // existing, cached tactics. In the case where tactics have changed, + // don't enqueue the proxy announcement and return no-match so that the + // proxy can store and apply the new tactics before announcing again. + + var tacticsPayload []byte + tacticsPayload, newTacticsTag, err = b.config.GetTactics(geoIPData, apiParams) + if err != nil { + return nil, errors.Trace(err) + } + + if tacticsPayload != nil && newTacticsTag != "" { + responsePayload, err := MarshalProxyAnnounceResponse( + &ProxyAnnounceResponse{ + TacticsPayload: tacticsPayload, + NoMatch: true, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil + } + + // AllowProxy may be used to disallow proxies from certain geolocations, + // such as censored locations, from announcing. Proxies with personal + // compartment IDs are always allowed, as they will be used only by + // clients specifically configured to use them. + + if len(announceRequest.PersonalCompartmentIDs) == 0 && + !b.config.AllowProxy(geoIPData) { + + return nil, errors.TraceNew("proxy disallowed") + } + + // Assign this proxy to a common compartment ID, unless it has specified a + // dedicated, personal compartment ID. Assignment uses consistent hashing + // keyed with the proxy ID, in an effort to keep proxies consistently + // assigned to the same compartment. + + var commonCompartmentIDs []ID + if len(announceRequest.PersonalCompartmentIDs) == 0 { + compartmentID, err := b.selectCommonCompartmentID(proxyID) + if err != nil { + return nil, errors.Trace(err) + } + commonCompartmentIDs = []ID{compartmentID} + } + + // Await client offer. + + timeout := common.ValueOrDefault( + time.Duration(atomic.LoadInt64(&b.proxyAnnounceTimeout)), + brokerProxyAnnounceTimeout) + announceCtx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + extendTransportTimeout(timeout) + + clientOffer, matchMetrics, err = b.matcher.Announce( + announceCtx, + proxyIP, + &MatchAnnouncement{ + Properties: MatchProperties{ + CommonCompartmentIDs: commonCompartmentIDs, + PersonalCompartmentIDs: announceRequest.PersonalCompartmentIDs, + GeoIPData: geoIPData, + NetworkType: GetNetworkType(announceRequest.Metrics.BaseAPIParameters), + NATType: announceRequest.Metrics.NATType, + PortMappingTypes: announceRequest.Metrics.PortMappingTypes, + }, + ProxyID: initiatorID, + ConnectionID: connectionID, + ProxyProtocolVersion: announceRequest.Metrics.ProxyProtocolVersion, + }) + if err != nil { + + var limitError *MatcherLimitError + limited := std_errors.As(err, &limitError) + + timeout := announceCtx.Err() == context.DeadlineExceeded + + if !limited && !timeout { + return nil, errors.Trace(err) + } + + // A no-match response is sent in the case of a timeout awaiting a + // match. The faster-failing rate or entry limiting case also results + // in a response, rather than an error return from handleProxyAnnounce, + // so that the proxy doesn't receive a 404 and flag its BrokerClient as + // having failed. + // + // When the timeout and limit case coincide, limit takes precedence in + // the response. + + if timeout && !limited { + + // Note: the respective proxy and broker timeouts, + // InproxyBrokerProxyAnnounceTimeout and + // InproxyProxyAnnounceRequestTimeout in tactics, should be + // configured so that the broker will timeout first and have an + // opportunity to send this response before the proxy times out. + + timedOut = true + + } else { + + // Record the specific limit error in the proxy-announce broker event. + + limitedErr = err + } + + responsePayload, err := MarshalProxyAnnounceResponse( + &ProxyAnnounceResponse{ + TacticsPayload: tacticsPayload, + Limited: limited, + NoMatch: timeout && !limited, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil + } + + // Respond with the client offer. The proxy will follow up with an answer + // request, which is relayed to the client, and then the WebRTC dial begins. + + // Limitation: as part of the client's tunnel establishment horse race, a + // client may abort an in-proxy dial at any point. If the overall dial is + // past the SDP exchange and aborted during the WebRTC connection + // establishment, the client may leave the proxy's Proxy.proxyOneClient + // dangling until timeout. Consider adding a signal from the client to + // the proxy, relayed by the broker, that a dial is aborted. + + responsePayload, err := MarshalProxyAnnounceResponse( + &ProxyAnnounceResponse{ + TacticsPayload: tacticsPayload, + ConnectionID: connectionID, + ClientProxyProtocolVersion: clientOffer.ClientProxyProtocolVersion, + ClientOfferSDP: clientOffer.ClientOfferSDP, + ClientRootObfuscationSecret: clientOffer.ClientRootObfuscationSecret, + DoDTLSRandomization: clientOffer.DoDTLSRandomization, + TrafficShapingParameters: clientOffer.TrafficShapingParameters, + NetworkProtocol: clientOffer.NetworkProtocol, + DestinationAddress: clientOffer.DestinationAddress, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +// handleClientOffer receives a client offer, awaits a matching client, and +// returns the proxy answer. handleClientOffer has a shorter timeout than +// handleProxyAnnounce since the client has supplied an SDP with STUN hole +// punches which will expire; and, in general, the client is trying to +// connect immediately and is also trying other candidates. +func (b *Broker) handleClientOffer( + ctx context.Context, + extendTransportTimeout ExtendTransportTimeout, + transportLogFields common.LogFields, + clientIP string, + geoIPData common.GeoIPData, + initiatorID ID, + requestPayload []byte) (retResponse []byte, retErr error) { + + // As a future enhancement, consider having proxies send offer SDPs with + // announcements and clients long poll to await a match and then provide + // an answer. This order of operations would make sense if client demand + // is high and proxy supply is lower. + // + // Also see comment in Proxy.proxyOneClient for other alternative + // approaches. + + // The client's session public key is ephemeral and is not logged. + + startTime := time.Now() + + var logFields common.LogFields + var serverParams *serverParams + var clientMatchOffer *MatchOffer + var proxyMatchAnnouncement *MatchAnnouncement + var proxyAnswer *MatchAnswer + var matchMetrics *MatchMetrics + var timedOut bool + var limitedErr error + + // Always log the outcome. + defer func() { + if logFields == nil { + logFields = b.config.APIParameterLogFieldFormatter(geoIPData, nil) + } + logFields["broker_event"] = "client-offer" + if serverParams != nil { + logFields["destination_server_id"] = serverParams.serverID + } + logFields["elapsed_time"] = time.Since(startTime) / time.Millisecond + if proxyAnswer != nil { + + // The presence of these fields indicate that a match was made, + // the proxy delivered an answer, and the client was still + // waiting for it. + + logFields["connection_id"] = proxyAnswer.ConnectionID + logFields["client_nat_type"] = clientMatchOffer.Properties.NATType + logFields["client_port_mapping_types"] = clientMatchOffer.Properties.PortMappingTypes + logFields["proxy_nat_type"] = proxyMatchAnnouncement.Properties.NATType + logFields["proxy_port_mapping_types"] = proxyMatchAnnouncement.Properties.PortMappingTypes + logFields["preferred_nat_match"] = + clientMatchOffer.Properties.IsPreferredNATMatch(&proxyMatchAnnouncement.Properties) + + // TODO: also log proxy ice_candidate_types and has_IPv6; for the + // client, these values are added by ValidateAndGetLogFields. + } + if timedOut { + logFields["timed_out"] = true + } + if retErr != nil { + logFields["error"] = retErr.Error() + } else if limitedErr != nil { + logFields["error"] = limitedErr.Error() + } + logFields.Add(transportLogFields) + logFields.Add(matchMetrics.GetMetrics()) + b.config.Logger.LogMetric(brokerMetricName, logFields) + }() + + offerRequest, err := UnmarshalClientOfferRequest(requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + // The filtered SDP is the request SDP with any invalid (bogon, unexpected + // GeoIP) ICE candidates filtered out. In some cases, clients cannot + // avoid submitting invalid candidates (see comment in + // processSDPAddresses), so all invalid candidates are removed and the + // remaining SDP is used. Filtered candidate information is logged in + // logFields. + + var filteredSDP []byte + filteredSDP, logFields, err = offerRequest.ValidateAndGetLogFields( + int(atomic.LoadInt64(&b.maxCompartmentIDs)), + b.config.LookupGeoIP, + b.config.APIParameterValidator, + b.config.APIParameterLogFieldFormatter, + geoIPData) + if err != nil { + return nil, errors.Trace(err) + } + + offerSDP := offerRequest.ClientOfferSDP + offerSDP.SDP = string(filteredSDP) + + // AllowClient may be used to disallow clients from certain geolocations + // from offering. Clients are always allowed to match proxies with shared + // personal compartment IDs. + + commonCompartmentIDs := offerRequest.CommonCompartmentIDs + + if !b.config.AllowClient(geoIPData) { + + if len(offerRequest.PersonalCompartmentIDs) == 0 { + return nil, errors.TraceNew("client disallowed") + } + + // Only match personal compartment IDs. + commonCompartmentIDs = nil + } + + // Validate that the proxy destination specified by the client is a valid + // dial address for a signed Psiphon server entry. This ensures a client + // can't misuse a proxy to connect to arbitrary destinations. + + serverParams, err = b.validateDestination( + geoIPData, + offerRequest.PackedDestinationServerEntry, + offerRequest.NetworkProtocol, + offerRequest.DestinationAddress) + if err != nil { + return nil, errors.Trace(err) + } + + // Enqueue the client offer and await a proxy matching and subsequent + // proxy answer. + + timeout := common.ValueOrDefault( + time.Duration(atomic.LoadInt64(&b.clientOfferTimeout)), + brokerClientOfferTimeout) + offerCtx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + extendTransportTimeout(timeout) + + clientMatchOffer = &MatchOffer{ + Properties: MatchProperties{ + CommonCompartmentIDs: commonCompartmentIDs, + PersonalCompartmentIDs: offerRequest.PersonalCompartmentIDs, + GeoIPData: geoIPData, + NetworkType: GetNetworkType(offerRequest.Metrics.BaseAPIParameters), + NATType: offerRequest.Metrics.NATType, + PortMappingTypes: offerRequest.Metrics.PortMappingTypes, + }, + ClientProxyProtocolVersion: offerRequest.Metrics.ProxyProtocolVersion, + ClientOfferSDP: offerSDP, + ClientRootObfuscationSecret: offerRequest.ClientRootObfuscationSecret, + DoDTLSRandomization: offerRequest.DoDTLSRandomization, + TrafficShapingParameters: offerRequest.TrafficShapingParameters, + NetworkProtocol: offerRequest.NetworkProtocol, + DestinationAddress: offerRequest.DestinationAddress, + DestinationServerID: serverParams.serverID, + } + + proxyAnswer, proxyMatchAnnouncement, matchMetrics, err = b.matcher.Offer( + offerCtx, + clientIP, + clientMatchOffer) + if err != nil { + + var limitError *MatcherLimitError + limited := std_errors.As(err, &limitError) + + timeout := offerCtx.Err() == context.DeadlineExceeded + + if !limited && !timeout { + return nil, errors.Trace(err) + } + + // A no-match response is sent in the case of a timeout awaiting a + // match. The faster-failing rate or entry limiting case also results + // in a response, rather than an error return from handleClientOffer, + // so that the client doesn't receive a 404 and flag its BrokerClient + // as having failed. + // + // When the timeout and limit case coincide, limit takes precedence in + // the response. + + if timeout && !limited { + + // Note: the respective client and broker timeouts, + // InproxyBrokerClientOfferTimeout and + // InproxyClientOfferRequestTimeout in tactics, should be configured + // so that the broker will timeout first and have an opportunity to + // send this response before the client times out. + + timedOut = true + + } else { + + // Record the specific limit error in the client-offer broker event. + + limitedErr = err + } + + responsePayload, err := MarshalClientOfferResponse( + &ClientOfferResponse{ + Limited: limited, + NoMatch: timeout && !limited, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil + } + + // Log the type of compartment matching that occurred. As + // PersonalCompartmentIDs are user-generated and shared, actual matching + // values are not logged as they may link users. + + // TODO: log matching common compartment IDs? + + matchedCommonCompartments := HaveCommonIDs( + proxyMatchAnnouncement.Properties.CommonCompartmentIDs, + clientMatchOffer.Properties.CommonCompartmentIDs) + + matchedPersonalCompartments := HaveCommonIDs( + proxyMatchAnnouncement.Properties.PersonalCompartmentIDs, + clientMatchOffer.Properties.PersonalCompartmentIDs) + + // Initiate a BrokerServerReport, which sends important information + // about the connection, including the original client IP, plus other + // values to be logged with server_tunne, to the server. The report is + // sent through a secure session established between the broker and the + // server, relayed by the client. + // + + // The first relay message will be embedded in the Psiphon handshake. The + // broker may already have an established session with the server. In + // this case, only only that initial message is required. The + // BrokerServerReport is a one-way message, which avoids extra untunneled + // client/broker traffic. + // + // Limitations, due to the one-way message: + // - the broker can't actively clean up pendingServerReports as + // tunnels are established and must rely on cache expiry. + // - the broker doesn't learn that the server accepted the report, and + // so cannot log a final connection status or signal the proxy to + // disconnect the client in any misuse cases. + // + // As a future enhancement, consider adding a _tunneled_ client relay + // of a server response acknowledging the broker report. + + relayPacket, err := b.initiateRelayedServerReport( + serverParams, + proxyAnswer.ConnectionID, + &BrokerServerReport{ + ProxyID: proxyAnswer.ProxyID, + ConnectionID: proxyAnswer.ConnectionID, + MatchedCommonCompartments: matchedCommonCompartments, + MatchedPersonalCompartments: matchedPersonalCompartments, + ProxyNATType: proxyMatchAnnouncement.Properties.NATType, + ProxyPortMappingTypes: proxyMatchAnnouncement.Properties.PortMappingTypes, + ClientNATType: clientMatchOffer.Properties.NATType, + ClientPortMappingTypes: clientMatchOffer.Properties.PortMappingTypes, + ClientIP: clientIP, + ProxyIP: proxyAnswer.ProxyIP, + }) + if err != nil { + return nil, errors.Trace(err) + } + + // Respond with the proxy answer and initial broker/server session packet. + + responsePayload, err := MarshalClientOfferResponse( + &ClientOfferResponse{ + ConnectionID: proxyAnswer.ConnectionID, + SelectedProxyProtocolVersion: proxyAnswer.SelectedProxyProtocolVersion, + ProxyAnswerSDP: proxyAnswer.ProxyAnswerSDP, + RelayPacketToServer: relayPacket, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +// handleProxyAnswer receives a proxy answer and delivers it to the waiting +// client. +func (b *Broker) handleProxyAnswer( + ctx context.Context, + extendTransportTimeout ExtendTransportTimeout, + transportLogFields common.LogFields, + proxyIP string, + geoIPData common.GeoIPData, + initiatorID ID, + requestPayload []byte) (retResponse []byte, retErr error) { + + startTime := time.Now() + + var logFields common.LogFields + var proxyAnswer *MatchAnswer + var answerError string + + // The proxy ID is an implicit parameter: it's the proxy's session public + // key. + proxyID := initiatorID + + // Always log the outcome. + defer func() { + if logFields == nil { + logFields = b.config.APIParameterLogFieldFormatter(geoIPData, nil) + } + logFields["broker_event"] = "proxy-answer" + logFields["proxy_id"] = proxyID + logFields["elapsed_time"] = time.Since(startTime) / time.Millisecond + if proxyAnswer != nil { + logFields["connection_id"] = proxyAnswer.ConnectionID + } + if answerError != "" { + // This is a proxy-reported error that occurred while creating the answer. + logFields["answer_error"] = answerError + } + if retErr != nil { + logFields["error"] = retErr.Error() + } + logFields.Add(transportLogFields) + b.config.Logger.LogMetric(brokerMetricName, logFields) + }() + + answerRequest, err := UnmarshalProxyAnswerRequest(requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + // The filtered SDP is the request SDP with any invalid (bogon, unexpected + // GeoIP) ICE candidates filtered out. In some cases, proxies cannot + // avoid submitting invalid candidates (see comment in + // processSDPAddresses), so all invalid candidates are removed and the + // remaining SDP is used. Filtered candidate information is logged in + // logFields. + + var filteredSDP []byte + filteredSDP, logFields, err = answerRequest.ValidateAndGetLogFields( + b.config.LookupGeoIP, + b.config.APIParameterValidator, + b.config.APIParameterLogFieldFormatter, + geoIPData) + if err != nil { + return nil, errors.Trace(err) + } + + answerSDP := answerRequest.ProxyAnswerSDP + answerSDP.SDP = string(filteredSDP) + + if answerRequest.AnswerError != "" { + + // The proxy failed to create an answer. + + answerError = answerRequest.AnswerError + + b.matcher.AnswerError(initiatorID, answerRequest.ConnectionID) + + } else { + + // Deliver the answer to the client. + + // Note that neither ProxyID nor ProxyIP is returned to the client. + // These fields are used internally in the matcher. + + proxyAnswer = &MatchAnswer{ + ProxyIP: proxyIP, + ProxyID: initiatorID, + ConnectionID: answerRequest.ConnectionID, + SelectedProxyProtocolVersion: answerRequest.SelectedProxyProtocolVersion, + ProxyAnswerSDP: answerSDP, + } + + err = b.matcher.Answer(proxyAnswer) + if err != nil { + return nil, errors.Trace(err) + } + } + + // There is no data in this response, it's simply an acknowledgement that + // the answer was received. Upon receiving the response, the proxy should + // begin the WebRTC dial operation. + + responsePayload, err := MarshalProxyAnswerResponse( + &ProxyAnswerResponse{}) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +// handleClientRelayedPacket facilitates broker/server sessions. The initial +// packet from the broker is sent to the client in the ClientOfferResponse. +// The client sends that to the server in the Psiphon handshake. If the +// session was already established, the relay ends there. Otherwise, the +// client receives any packet sent back by the server and that server packet +// is then delivered to the broker in a ClientRelayedPacketRequest. If the +// session needs to be [re-]negotiated, there are additional +// ClientRelayedPacket round trips until the session is established and the +// BrokerServerReport is securely exchanged between the broker and server. +func (b *Broker) handleClientRelayedPacket( + ctx context.Context, + extendTransportTimeout ExtendTransportTimeout, + transportLogFields common.LogFields, + geoIPData common.GeoIPData, + initiatorID ID, + requestPayload []byte) (retResponse []byte, retErr error) { + + startTime := time.Now() + + var logFields common.LogFields + var relayedPacketRequest *ClientRelayedPacketRequest + var serverID string + + // Always log the outcome. + defer func() { + if logFields == nil { + logFields = b.config.APIParameterLogFieldFormatter(geoIPData, nil) + } + logFields["broker_event"] = "client-relayed-packet" + logFields["elapsed_time"] = time.Since(startTime) / time.Millisecond + if relayedPacketRequest != nil { + logFields["connection_id"] = relayedPacketRequest.ConnectionID + } + if serverID != "" { + logFields["destination_server_id"] = serverID + } + if retErr != nil { + logFields["error"] = retErr.Error() + } + logFields.Add(transportLogFields) + b.config.Logger.LogMetric(brokerMetricName, logFields) + }() + + relayedPacketRequest, err := UnmarshalClientRelayedPacketRequest(requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + logFields, err = relayedPacketRequest.ValidateAndGetLogFields( + b.config.APIParameterValidator, + b.config.APIParameterLogFieldFormatter, + geoIPData) + if err != nil { + return nil, errors.Trace(err) + } + + // The relay state is associated with the connection ID. + + strConnectionID := string(relayedPacketRequest.ConnectionID[:]) + + entry, ok := b.pendingServerReports.Get(strConnectionID) + if !ok { + // The relay state is not found; it may have been evicted from the + // cache. The client will receive a generic error in this case and + // should stop relaying. Assuming the server is configured to require + // a BrokerServerReport, the tunnel will be terminated, so the + // client should also abandon the dial. + return nil, errors.TraceNew("no pending report") + } + pendingServerReport := entry.(*pendingServerReport) + + serverID = pendingServerReport.serverID + + // When the broker tried to use an existing session that was expired on the + // server, the server will respond here with a signed session reset token. The + // broker resets the session and starts to establish a new session. + // + // The non-waiting session establishment mode is used for broker/server + // sessions: if multiple clients concurrently try to relay new sessions, + // all establishments will happen in parallel without forcing any clients + // to wait for one client to lead the establishment. The last established + // session will be retained for reuse. + // + // If there is an error, the relayed packet is invalid. Drop the packet + // and return an error to be logged. Do _not_ reset the session, + // otherwise a malicious client could interrupt a valid broker/server + // session with a malformed packet. + + // Next is given a nil ctx since we're not waiting for any other client to + // establish the session. + out, _, err := pendingServerReport.roundTrip.Next( + nil, relayedPacketRequest.PacketFromServer) + if err != nil { + return nil, errors.Trace(err) + } + + if out == nil { + + // The BrokerServerReport is a one-way message, As a result, the relay + // never ends with broker receiving a response; it's either + // (re)handshaking or sending the one-way report. + + return nil, errors.TraceNew("unexpected nil packet") + } + + // Return the next broker packet for the client to relay to the server. + // When it receives a nil PacketToServer, the client will stop relaying. + + responsePayload, err := MarshalClientRelayedPacketResponse( + &ClientRelayedPacketResponse{ + PacketToServer: out, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +type pendingServerReport struct { + serverID string + serverReport *BrokerServerReport + roundTrip *InitiatorRoundTrip +} + +func (b *Broker) initiateRelayedServerReport( + serverParams *serverParams, + connectionID ID, + serverReport *BrokerServerReport) ([]byte, error) { + + reportPayload, err := MarshalBrokerServerReport(serverReport) + if err != nil { + return nil, errors.Trace(err) + } + + // Force a new, concurrent session establishment with the server even if + // another handshake is already in progess, relayed by some other client. + // This ensures clients don't block waiting for other client relays + // through other tunnels. The last established session will be retained + // for reuse. + + waitToShareSession := false + + roundTrip, err := b.initiatorSessions.NewRoundTrip( + serverParams.sessionPublicKey, + serverParams.sessionRootObfuscationSecret, + waitToShareSession, + reportPayload) + if err != nil { + return nil, errors.Trace(err) + } + + relayPacket, _, err := roundTrip.Next(nil, nil) + if err != nil { + return nil, errors.Trace(err) + } + + strConnectionID := string(connectionID[:]) + + b.pendingServerReports.Set( + strConnectionID, + &pendingServerReport{ + serverID: serverParams.serverID, + serverReport: serverReport, + roundTrip: roundTrip, + }, + time.Duration(atomic.LoadInt64(&b.pendingServerReportsTTL))) + + return relayPacket, nil +} + +type serverParams struct { + serverID string + sessionPublicKey SessionPublicKey + sessionRootObfuscationSecret ObfuscationSecret +} + +// validateDestination checks that the client's specified proxy dial +// destination is valid destination address for a tunnel protocol in the +// specified signed and valid Psiphon server entry. +func (b *Broker) validateDestination( + geoIPData common.GeoIPData, + packedDestinationServerEntry []byte, + networkProtocol NetworkProtocol, + destinationAddress string) (*serverParams, error) { + + var packedServerEntry protocol.PackedServerEntryFields + err := cbor.Unmarshal(packedDestinationServerEntry, &packedServerEntry) + if err != nil { + return nil, errors.Trace(err) + } + + serverEntryFields, err := protocol.DecodePackedServerEntryFields(packedServerEntry) + if err != nil { + return nil, errors.Trace(err) + } + + // Strip any unsigned fields, which could be forged by the client. In + // particular, this includes the server entry tag, which, in some cases, + // is locally populated by a client for its own reference. + + serverEntryFields.RemoveUnsignedFields() + + // Check that the server entry is signed by Psiphon. Otherwise a client + // could manufacture a server entry corresponding to an arbitrary dial + // destination. + + err = serverEntryFields.VerifySignature( + b.config.ServerEntrySignaturePublicKey) + if err != nil { + return nil, errors.Trace(err) + } + + // The server entry tag must be set and signed by Psiphon, as local, + // client derived tags are unsigned and untrusted. + + serverEntryTag := serverEntryFields.GetTag() + + if serverEntryTag == "" { + return nil, errors.TraceNew("missing server entry tag") + } + + // Check that the server entry tag is on a list of active and valid + // Psiphon server entry tags. This ensures that an obsolete entry for a + // pruned server cannot by misused by a client to proxy to what's no + // longer a Psiphon server. + + if !b.config.IsValidServerEntryTag(serverEntryTag) { + return nil, errors.TraceNew("invalid server entry tag") + } + + serverID := serverEntryFields.GetDiagnosticID() + + serverEntry, err := serverEntryFields.GetServerEntry() + if err != nil { + return nil, errors.Trace(err) + } + + // Validate the dial host (IP or domain) and port matches a tunnel + // protocol offered by the server entry. + + destHost, destPort, err := net.SplitHostPort(destinationAddress) + if err != nil { + return nil, errors.Trace(err) + } + + destPortNum, err := strconv.Atoi(destPort) + if err != nil { + return nil, errors.Trace(err) + } + + // For domain fronted cases, since we can't verify the Host header, access + // is strictly to limited to targeted clients. Clients should use tactics + // to avoid disallowed domain dial address cases, but here the broker + // enforces it. + // + // TODO: this issue could be further mitigated with a server + // acknowledgement of the broker's report, with no acknowledgement + // followed by signaling the proxy to terminate client connection. + + // This assumes that any domain dial is for domain fronting. + isDomain := net.ParseIP(destHost) == nil + if isDomain && !b.config.AllowDomainFrontedDestinations(geoIPData) { + return nil, errors.TraceNew("domain fronted destinations disallowed") + } + + // The server entry must include an in-proxy tunnel protocol capability + // and corresponding dial port number. In-proxy capacity may be set for + // only a subset of all Psiphon servers, to limited the number of servers + // a proxy can observe and enumerate. Well-behaved clients will not send + // any server entries lacking this capability, but here the broker + // enforces it. + + if !serverEntry.IsValidInproxyDialAddress(networkProtocol.String(), destHost, destPortNum) { + return nil, errors.TraceNew("invalid destination address") + } + + // Extract and return the key material to be used for the secure session + // and BrokerServer exchange between the broker and the Psiphon server + // corresponding to this server entry. + + params := &serverParams{ + serverID: serverID, + } + + params.sessionPublicKey, err = SessionPublicKeyFromString( + serverEntry.InproxySessionPublicKey) + if err != nil { + return nil, errors.Trace(err) + } + + params.sessionRootObfuscationSecret, err = ObfuscationSecretFromString( + serverEntry.InproxySessionRootObfuscationSecret) + if err != nil { + return nil, errors.Trace(err) + } + + return params, nil +} + +func (b *Broker) isCommonCompartmentIDHashingInitialized() bool { + b.commonCompartmentsMutex.Lock() + defer b.commonCompartmentsMutex.Unlock() + return b.commonCompartments != nil +} + +func (b *Broker) initializeCommonCompartmentIDHashing( + commonCompartmentIDs []ID) error { + + b.commonCompartmentsMutex.Lock() + defer b.commonCompartmentsMutex.Unlock() + + // At least one common compartment ID is required. At a minimum, one ID + // will be used and distributed to clients via tactics, limiting matching + // to those clients targeted to receive that tactic parameters. + if len(commonCompartmentIDs) == 0 { + return errors.TraceNew("missing common compartment IDs") + } + + // The consistent package doesn't allow duplicate members. + checkDup := make(map[ID]bool, len(commonCompartmentIDs)) + for _, compartmentID := range commonCompartmentIDs { + if checkDup[compartmentID] { + return errors.TraceNew("duplicate common compartment IDs") + } + checkDup[compartmentID] = true + } + + // Proxies without personal compartment IDs are randomly assigned to the + // set of common, Psiphon-specified, compartment IDs. These common + // compartment IDs are then distributed to targeted clients through + // tactics or embedded in OSLs, to limit access to proxies. + // + // Use consistent hashing in an effort to keep a consistent assignment of + // proxies (as specified by proxy ID, which covers all announcements for + // a single proxy). This is more of a concern for long-lived, permanent + // proxies that are not behind any NAT. + // + // Even with consistent hashing, a subset of proxies will still change + // assignment when CommonCompartmentIDs changes. + + consistentMembers := make([]consistent.Member, len(commonCompartmentIDs)) + for i, compartmentID := range commonCompartmentIDs { + consistentMembers[i] = consistentMember(compartmentID.String()) + } + + b.commonCompartments = consistent.New( + consistentMembers, + consistent.Config{ + PartitionCount: len(consistentMembers), + ReplicationFactor: 1, + Load: 1, + Hasher: xxhasher{}, + }) + + return nil +} + +// xxhasher wraps github.com/cespare/xxhash.Sum64 in the interface expected by +// github.com/buraksezer/consistent. xxhash is a high quality hash function +// used in github.com/buraksezer/consistent examples. +type xxhasher struct{} + +func (h xxhasher) Sum64(data []byte) uint64 { + return xxhash.Sum64(data) +} + +// consistentMember wraps the string type with the interface expected by +// github.com/buraksezer/consistent. +type consistentMember string + +func (m consistentMember) String() string { + return string(m) +} + +func (b *Broker) selectCommonCompartmentID(proxyID ID) (ID, error) { + + b.commonCompartmentsMutex.Lock() + defer b.commonCompartmentsMutex.Unlock() + + compartmentID, err := IDFromString( + b.commonCompartments.LocateKey(proxyID[:]).String()) + if err != nil { + return compartmentID, errors.Trace(err) + } + + return compartmentID, nil +} diff --git a/psiphon/common/inproxy/brokerClient.go b/psiphon/common/inproxy/brokerClient.go new file mode 100644 index 000000000..32cc406e6 --- /dev/null +++ b/psiphon/common/inproxy/brokerClient.go @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + std_errors "errors" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" +) + +// Timeouts should be aligned with Broker timeouts. + +const ( + sessionHandshakeRoundTripTimeout = 10 * time.Second + proxyAnnounceRequestTimeout = 2 * time.Minute + proxyAnswerRequestTimeout = 10 * time.Second + clientOfferRequestTimeout = 10 * time.Second + clientRelayedPacketRequestTimeout = 10 * time.Second +) + +// BrokerClient is used to make requests to a broker. +// +// Each BrokerClient maintains a secure broker session. A BrokerClient and its +// session may be used for multiple concurrent requests. Session key material +// is provided by BrokerDialCoordinator and must remain static for the +// lifetime of the BrokerClient. +// +// Round trips between the BrokerClient and broker are provided by +// BrokerClientRoundTripper from BrokerDialCoordinator. The RoundTripper must +// maintain the association between a request payload and the corresponding +// response payload. The canonical RoundTripper is an HTTP client, with +// HTTP/2 or HTTP/3 used to multiplex concurrent requests. +// +// When the BrokerDialCoordinator BrokerClientRoundTripperSucceeded call back +// is invoked, the RoundTripper provider may mark the RoundTripper dial +// properties for replay. +// +// When the BrokerDialCoordinator BrokerClientRoundTripperFailed call back is +// invoked, the RoundTripper provider should clear any replay state and also +// create a new RoundTripper to be returned from BrokerClientRoundTripper. +// +// BrokerClient does not have a Close operation. The user should close the +// provided RoundTripper as appropriate. +// +// The secure session layer includes obfuscation that provides random padding +// and uniformly random payload content. The RoundTripper is expected to add +// its own obfuscation layer; for example, domain fronting. +type BrokerClient struct { + coordinator BrokerDialCoordinator + sessions *InitiatorSessions +} + +// NewBrokerClient initializes a new BrokerClient with the provided +// BrokerDialCoordinator. +func NewBrokerClient(coordinator BrokerDialCoordinator) (*BrokerClient, error) { + + // A client is expected to use an ephemeral key, and can return a + // zero-value private key. Each proxy should use a peristent key, as the + // corresponding public key is the proxy ID, which is used to credit the + // proxy for its service. + + privateKey := coordinator.BrokerClientPrivateKey() + if privateKey.IsZero() { + var err error + privateKey, err = GenerateSessionPrivateKey() + if err != nil { + return nil, errors.Trace(err) + } + } + + return &BrokerClient{ + coordinator: coordinator, + sessions: NewInitiatorSessions(privateKey), + }, nil +} + +// GetBrokerDialCoordinator returns the BrokerDialCoordinator associated with +// the BrokerClient. +func (b *BrokerClient) GetBrokerDialCoordinator() BrokerDialCoordinator { + return b.coordinator +} + +// ProxyAnnounce sends a ProxyAnnounce request and returns the response. +func (b *BrokerClient) ProxyAnnounce( + ctx context.Context, + requestDelay time.Duration, + request *ProxyAnnounceRequest) (*ProxyAnnounceResponse, error) { + + requestPayload, err := MarshalProxyAnnounceRequest(request) + if err != nil { + return nil, errors.Trace(err) + } + + requestTimeout := common.ValueOrDefault( + b.coordinator.AnnounceRequestTimeout(), + proxyAnnounceRequestTimeout) + + responsePayload, err := b.roundTrip( + ctx, requestDelay, requestTimeout, requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + response, err := UnmarshalProxyAnnounceResponse(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + + return response, nil +} + +// ClientOffer sends a ClientOffer request and returns the response. +func (b *BrokerClient) ClientOffer( + ctx context.Context, + request *ClientOfferRequest) (*ClientOfferResponse, error) { + + requestPayload, err := MarshalClientOfferRequest(request) + if err != nil { + return nil, errors.Trace(err) + } + + requestTimeout := common.ValueOrDefault( + b.coordinator.OfferRequestTimeout(), + clientOfferRequestTimeout) + + responsePayload, err := b.roundTrip( + ctx, 0, requestTimeout, requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + response, err := UnmarshalClientOfferResponse(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + + return response, nil +} + +// ProxyAnswer sends a ProxyAnswer request and returns the response. +func (b *BrokerClient) ProxyAnswer( + ctx context.Context, + request *ProxyAnswerRequest) (*ProxyAnswerResponse, error) { + + requestPayload, err := MarshalProxyAnswerRequest(request) + if err != nil { + return nil, errors.Trace(err) + } + + requestTimeout := common.ValueOrDefault( + b.coordinator.AnswerRequestTimeout(), + proxyAnswerRequestTimeout) + + responsePayload, err := b.roundTrip( + ctx, 0, requestTimeout, requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + response, err := UnmarshalProxyAnswerResponse(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + + return response, nil +} + +// ClientRelayedPacket sends a ClientRelayedPacket request and returns the +// response. +func (b *BrokerClient) ClientRelayedPacket( + ctx context.Context, + request *ClientRelayedPacketRequest) (*ClientRelayedPacketResponse, error) { + + requestPayload, err := MarshalClientRelayedPacketRequest(request) + if err != nil { + return nil, errors.Trace(err) + } + + requestTimeout := common.ValueOrDefault( + b.coordinator.RelayedPacketRequestTimeout(), + clientRelayedPacketRequestTimeout) + + responsePayload, err := b.roundTrip( + ctx, 0, requestTimeout, requestPayload) + if err != nil { + return nil, errors.Trace(err) + } + + response, err := UnmarshalClientRelayedPacketResponse(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + + return response, nil +} + +func (b *BrokerClient) roundTrip( + ctx context.Context, + requestDelay time.Duration, + requestTimeout time.Duration, + request []byte) ([]byte, error) { + + // The round tripper may need to establish a transport-level connection; + // or this may already be established. + + roundTripper, err := b.coordinator.BrokerClientRoundTripper() + if err != nil { + return nil, errors.Trace(err) + } + + // InitiatorSessions.RoundTrip may make serveral round trips with + // roundTripper in order to complete a session establishment handshake. + // + // When there's an active session, only a single round trip is required, + // to exchange the application-level request and response. + // + // When a concurrent BrokerClient request is currently performing a + // session handshake, InitiatorSessions.RoundTrip will await completion + // of that handshake before sending the application-layer request. + // + // Note the waitToShareSession limitation, documented in + // InitiatorSessions.RoundTrip: a new session must complete a full, + // application-level round trip (e.g., ProxyAnnounce/ClientOffer), not + // just the session handshake, before a session becomes ready to share. + // + // Retries are built in to InitiatorSessions.RoundTrip: if there's an + // existing session and it's expired, there will be additional round + // trips to establish a fresh session. + // + // While the round tripper is responsible for maintaining the + // request/response association, the application-level request and + // response are tagged with a RoundTripID which is checked to ensure the + // association is maintained. + // + // InitiatorSessions.RoundTrip will apply sessionHandshakeTimeout to any + // round trips required for Noise session handshakes; apply requestDelay + // before the application-level request round trip; and apply + // requestTimeout to the network round trip following the delay, if any. + // Any time spent blocking on waitToShareSession is not included in + // requestDelay or requestTimeout. + + waitToShareSession := true + + sessionHandshakeTimeout := common.ValueOrDefault( + b.coordinator.SessionHandshakeRoundTripTimeout(), + sessionHandshakeRoundTripTimeout) + + response, err := b.sessions.RoundTrip( + ctx, + roundTripper, + b.coordinator.BrokerPublicKey(), + b.coordinator.BrokerRootObfuscationSecret(), + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + + var failedError *RoundTripperFailedError + failed := std_errors.As(err, &failedError) + + if failed { + // The BrokerDialCoordinator provider should close the existing + // BrokerClientRoundTripper and create a new RoundTripper to return + // in the next BrokerClientRoundTripper call. + // + // The session will be closed, if necessary, by InitiatorSessions. + // It's possible that the session remains valid and only the + // RoundTripper transport layer needs to be reset. + b.coordinator.BrokerClientRoundTripperFailed(roundTripper) + } + + return nil, errors.Trace(err) + } + + b.coordinator.BrokerClientRoundTripperSucceeded(roundTripper) + + return response, nil +} diff --git a/psiphon/common/inproxy/client.go b/psiphon/common/inproxy/client.go new file mode 100644 index 000000000..7be44dcd7 --- /dev/null +++ b/psiphon/common/inproxy/client.go @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "net" + "net/netip" + "sync" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" +) + +const ( + clientOfferRetryDelay = 1 * time.Second + clientOfferRetryJitter = 0.3 +) + +// ClientConn is a network connection to an in-proxy, which is relayed to a +// Psiphon server destination. Psiphon clients use a ClientConn in place of a +// physical TCP or UDP socket connection, passing the ClientConn into tunnel +// protocol dials. ClientConn implements both net.Conn and net.PacketConn, +// with net.PacketConn's ReadFrom/WriteTo behaving as if connected to the +// initial dial address. +type ClientConn struct { + config *ClientConfig + brokerClient *BrokerClient + webRTCConn *webRTCConn + connectionID ID + remoteAddr net.Addr + + relayMutex sync.Mutex + initialRelayPacket []byte +} + +// ClientConfig specifies the configuration for a ClientConn dial. +type ClientConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // EnableWebRTCDebugLogging indicates whether to emit WebRTC debug logs. + EnableWebRTCDebugLogging bool + + // BaseAPIParameters should be populated with Psiphon handshake metrics + // parameters. These will be sent to and logger by the broker. + BaseAPIParameters common.APIParameters + + // BrokerClient is the BrokerClient to use for broker API calls. The + // BrokerClient may be shared with other client dials, allowing for + // connection and session reuse. + BrokerClient *BrokerClient + + // WebRTCDialCoordinator specifies specific WebRTC dial strategies and + // settings; WebRTCDialCoordinator also facilities dial replay by + // receiving callbacks when individual dial steps succeed or fail. + WebRTCDialCoordinator WebRTCDialCoordinator + + // ReliableTransport specifies whether to use reliable delivery with the + // underlying WebRTC DataChannel that relays the ClientConn traffic. When + // using a ClientConn to proxy traffic that expects reliable delivery, as + // if the physical network protocol were TCP, specify true. When using a + // ClientConn to proxy traffic that expects unreliable delivery, such as + // QUIC protocols expecting the physical network protocol UDP, specify + // false. + ReliableTransport bool + + // DialNetworkProtocol specifies whether the in-proxy will relay TCP or UDP + // traffic. + DialNetworkProtocol NetworkProtocol + + // DialAddress is the host:port destination network address the in-proxy + // will relay traffic to. + DialAddress string + + // RemoteAddrOverride, when specified, is the address to be returned by + // ClientConn.RemoteAddr. When not specified, ClientConn.RemoteAddr + // returns a zero-value address. + RemoteAddrOverride string + + // PackedDestinationServerEntry is a signed Psiphon server entry + // corresponding to the destination dial address. This signed server + // entry is sent to the broker, which will use it to validate that the + // server is a valid in-proxy destination. + // + // The expected format is CBOR-encoded protoco.PackedServerEntryFields, + // with the caller invoking ServerEntryFields.RemoveUnsignedFields to + // prune local, unnsigned fields before sending. + PackedDestinationServerEntry []byte +} + +// DialClient establishes an in-proxy connection for relaying traffic to the +// specified destination. DialClient first contacts the broker and initiates +// an in-proxy pairing. config.BrokerClient may be shared by multiple dials, +// and may have a preexisting connection and session with the broker. +func DialClient( + ctx context.Context, + config *ClientConfig) (retConn *ClientConn, retErr error) { + + // Configure the value returned by ClientConn.RemoteAddr. If no + // config.RemoteAddrOverride is specified, RemoteAddr will return a + // zero-value, non-nil net.Addr. The underlying webRTCConn.RemoteAddr + // returns only nil. + + var remoteAddr net.Addr + var addrPort netip.AddrPort + if config.RemoteAddrOverride != "" { + + // ParseAddrPort does not perform any domain resolution. The addr + // portion must be an IP address. + var err error + addrPort, err = netip.ParseAddrPort(config.RemoteAddrOverride) + if err != nil { + return nil, errors.Trace(err) + } + } + + switch config.DialNetworkProtocol { + case NetworkProtocolTCP: + remoteAddr = net.TCPAddrFromAddrPort(addrPort) + case NetworkProtocolUDP: + remoteAddr = net.UDPAddrFromAddrPort(addrPort) + default: + return nil, errors.TraceNew("unexpected DialNetworkProtocol") + } + + // Reset and configure port mapper component, as required. See + // initPortMapper comment. + initPortMapper(config.WebRTCDialCoordinator) + + // Future improvements: + // + // - The broker connection and session, when not already established, + // could be established concurrent with the WebRTC offer setup + // (STUN/ICE gathering). + // + // - The STUN state used for NAT discovery could be reused for the WebRTC + // dial. + // + // - A subsequent WebRTC offer setup could be run concurrent with the + // client offer request, in case that request or WebRTC connections + // fails, so that the offer is immediately ready for a retry. + + if config.WebRTCDialCoordinator.DiscoverNAT() { + + // NAT discovery, using the RFC5780 algorithms is optional and + // conditional on the DiscoverNAT flag. Discovery is performed + // synchronously, so that NAT topology metrics can be reported to the + // broker in the ClientOffer request. For clients, NAT discovery is + // intended to be performed at a low sampling rate, since the RFC5780 + // traffic may be unusual(differs from standard STUN requests for + // ICE) and since this step delays the dial. Clients should to cache + // their NAT discovery outcomes, associated with the current network + // by network ID, so metrics can be reported even without a discovery + // step; this is facilitated by WebRTCDialCoordinator. + // + // NAT topology metrics are used by the broker to optimize client and + // in-proxy matching. + // + // For client NAT discovery, port mapping type discovery is skipped + // since port mappings are attempted when preparing the WebRTC offer, + // which also happens before the ClientOffer request. + + NATDiscover( + ctx, + &NATDiscoverConfig{ + Logger: config.Logger, + WebRTCDialCoordinator: config.WebRTCDialCoordinator, + SkipPortMapping: true, + }) + } + + var result *clientWebRTCDialResult + for { + + // Repeatedly try to establish in-proxy/WebRTC connection until the + // dial context is canceled or times out. + // + // If a broker request fails, the WebRTCDialCoordinator + // BrokerClientRoundTripperFailed callback will be invoked, so the + // Psiphon client will have an opportunity to select new broker + // connection parameters before a retry. Similarly, when STUN servers + // fail, WebRTCDialCoordinator STUNServerAddressFailed will be + // invoked, giving the Psiphon client an opportunity to select new + // STUN server parameter -- although, in this failure case, the + // WebRTC connection attempt can succeed with other ICE candidates or + // no ICE candidates. + + err := ctx.Err() + if err != nil { + return nil, errors.Trace(err) + } + + var retry bool + result, retry, err = dialClientWebRTCConn(ctx, config) + if err == nil { + break + } + + if retry { + config.Logger.WithTraceFields(common.LogFields{"error": err}).Warning("dial failed") + + // This delay is intended avoid overloading the broker with + // repeated requests. A jitter is applied to mitigate a traffic + // fingerprint. + + brokerCoordinator := config.BrokerClient.GetBrokerDialCoordinator() + common.SleepWithJitter( + ctx, + common.ValueOrDefault(brokerCoordinator.OfferRetryDelay(), clientOfferRetryDelay), + common.ValueOrDefault(brokerCoordinator.OfferRetryJitter(), clientOfferRetryJitter)) + + continue + } + + return nil, errors.Trace(err) + } + + return &ClientConn{ + config: config, + webRTCConn: result.conn, + connectionID: result.connectionID, + initialRelayPacket: result.relayPacket, + remoteAddr: remoteAddr, + }, nil +} + +// GetConnectionID returns the in-proxy connection ID, which the client should +// include with its Psiphon handshake parameters. +func (conn *ClientConn) GetConnectionID() ID { + return conn.connectionID +} + +// InitialRelayPacket returns the initial packet in the broker->server +// messaging session. The client must relay these packets to facilitate this +// message exchange. Session security ensures clients cannot decrypt, modify, +// or replay these session packets. The Psiphon client will sent the initial +// packet as a parameter in the Psiphon server handshake request. +func (conn *ClientConn) InitialRelayPacket() []byte { + conn.relayMutex.Lock() + defer conn.relayMutex.Unlock() + + relayPacket := conn.initialRelayPacket + conn.initialRelayPacket = nil + return relayPacket +} + +// RelayPacket takes any server->broker messaging session packets the client +// receives and relays them back to the broker. RelayPacket returns the next +// broker->server packet, if any, or nil when the message exchange is +// complete. Psiphon clients receive a server->broker packet in the Psiphon +// server handshake response and exchange additional packets in a +// post-handshake Psiphon server request. +// +// If RelayPacket fails, the client should close the ClientConn and redial. +func (conn *ClientConn) RelayPacket( + ctx context.Context, in []byte) ([]byte, error) { + + // Future improvement: the client relaying these packets back to the + // broker is potentially an inter-flow fingerprint, alternating between + // the WebRTC flow and the client's broker connection. It may be possible + // to avoid this by having the client connect to the broker via the + // tunnel, resuming its broker session and relaying any further packets. + + // Limitation: here, this mutex only ensures that this ClientConn doesn't + // make concurrent ClientRelayedPacket requests. The client must still + // ensure that the packets are delivered in the correct relay sequence. + conn.relayMutex.Lock() + defer conn.relayMutex.Unlock() + + // ClientRelayedPacket applies + // BrokerDialCoordinator.RelayedPacketRequestTimeout as the request + // timeout. + relayResponse, err := conn.config.BrokerClient.ClientRelayedPacket( + ctx, + &ClientRelayedPacketRequest{ + ConnectionID: conn.connectionID, + PacketFromServer: in, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return relayResponse.PacketToServer, nil +} + +type clientWebRTCDialResult struct { + conn *webRTCConn + connectionID ID + relayPacket []byte +} + +func dialClientWebRTCConn( + ctx context.Context, + config *ClientConfig) (retResult *clientWebRTCDialResult, retRetry bool, retErr error) { + + // Initialize the WebRTC offer + + doTLSRandomization := config.WebRTCDialCoordinator.DoDTLSRandomization() + trafficShapingParameters := config.WebRTCDialCoordinator.DataChannelTrafficShapingParameters() + clientRootObfuscationSecret := config.WebRTCDialCoordinator.ClientRootObfuscationSecret() + + webRTCConn, SDP, SDPMetrics, err := newWebRTCConnWithOffer( + ctx, &webRTCConfig{ + Logger: config.Logger, + EnableDebugLogging: config.EnableWebRTCDebugLogging, + WebRTCDialCoordinator: config.WebRTCDialCoordinator, + ClientRootObfuscationSecret: clientRootObfuscationSecret, + DoDTLSRandomization: doTLSRandomization, + TrafficShapingParameters: trafficShapingParameters, + ReliableTransport: config.ReliableTransport, + }) + if err != nil { + return nil, true, errors.Trace(err) + } + defer func() { + // Cleanup on early return + if retErr != nil { + webRTCConn.Close() + } + }() + + // Send the ClientOffer request to the broker + + brokerCoordinator := config.BrokerClient.GetBrokerDialCoordinator() + + packedBaseParams, err := protocol.EncodePackedAPIParameters(config.BaseAPIParameters) + if err != nil { + return nil, false, errors.Trace(err) + } + + // Here, WebRTCDialCoordinator.NATType may be populated from discovery, or + // replayed from a previous run on the same network ID. + // WebRTCDialCoordinator.PortMappingTypes may be populated via + // newWebRTCConnWithOffer. + + // ClientOffer applies BrokerDialCoordinator.OfferRequestTimeout as the + // request timeout. + offerResponse, err := config.BrokerClient.ClientOffer( + ctx, + &ClientOfferRequest{ + Metrics: &ClientMetrics{ + BaseAPIParameters: packedBaseParams, + ProxyProtocolVersion: ProxyProtocolVersion1, + NATType: config.WebRTCDialCoordinator.NATType(), + PortMappingTypes: config.WebRTCDialCoordinator.PortMappingTypes(), + }, + CommonCompartmentIDs: brokerCoordinator.CommonCompartmentIDs(), + PersonalCompartmentIDs: brokerCoordinator.PersonalCompartmentIDs(), + ClientOfferSDP: SDP, + ICECandidateTypes: SDPMetrics.iceCandidateTypes, + ClientRootObfuscationSecret: clientRootObfuscationSecret, + DoDTLSRandomization: doTLSRandomization, + TrafficShapingParameters: trafficShapingParameters, + PackedDestinationServerEntry: config.PackedDestinationServerEntry, + NetworkProtocol: config.DialNetworkProtocol, + DestinationAddress: config.DialAddress, + }) + if err != nil { + return nil, false, errors.Trace(err) + } + + // No retry when rate/entry limited; do retry on no-match, as a match may + // soon appear. + + if offerResponse.Limited { + return nil, false, errors.TraceNew("limited") + + } else if offerResponse.NoMatch { + + return nil, true, errors.TraceNew("no proxy match") + + } + + if offerResponse.SelectedProxyProtocolVersion != ProxyProtocolVersion1 { + // This case is currently unexpected, as all clients and proxies use + // ProxyProtocolVersion1. + return nil, false, errors.Tracef( + "Unsupported proxy protocol version: %d", + offerResponse.SelectedProxyProtocolVersion) + } + + // Establish the WebRTC DataChannel connection + + err = webRTCConn.SetRemoteSDP(offerResponse.ProxyAnswerSDP) + if err != nil { + return nil, true, errors.Trace(err) + } + + awaitDataChannelCtx, awaitDataChannelCancelFunc := context.WithTimeout( + ctx, + common.ValueOrDefault( + config.WebRTCDialCoordinator.WebRTCAwaitDataChannelTimeout(), dataChannelAwaitTimeout)) + defer awaitDataChannelCancelFunc() + + err = webRTCConn.AwaitInitialDataChannel(awaitDataChannelCtx) + if err != nil { + return nil, true, errors.Trace(err) + } + + return &clientWebRTCDialResult{ + conn: webRTCConn, + connectionID: offerResponse.ConnectionID, + relayPacket: offerResponse.RelayPacketToServer, + }, false, nil +} + +// GetMetrics implements the common.MetricsSource interface. +func (conn *ClientConn) GetMetrics() common.LogFields { + return conn.webRTCConn.GetMetrics() +} + +func (conn *ClientConn) Close() error { + return errors.Trace(conn.webRTCConn.Close()) +} + +func (conn *ClientConn) IsClosed() bool { + return conn.webRTCConn.IsClosed() +} + +func (conn *ClientConn) Read(p []byte) (int, error) { + n, err := conn.webRTCConn.Read(p) + return n, errors.Trace(err) +} + +// Write relays p through the in-proxy connection. len(p) should be under +// 32K. +func (conn *ClientConn) Write(p []byte) (int, error) { + n, err := conn.webRTCConn.Write(p) + return n, errors.Trace(err) +} + +func (conn *ClientConn) LocalAddr() net.Addr { + return conn.webRTCConn.LocalAddr() +} + +func (conn *ClientConn) RemoteAddr() net.Addr { + // Do not return conn.webRTCConn.RemoteAddr(), which is always nil. + return conn.remoteAddr +} + +func (conn *ClientConn) SetDeadline(t time.Time) error { + return conn.webRTCConn.SetDeadline(t) +} + +func (conn *ClientConn) SetReadDeadline(t time.Time) error { + return conn.webRTCConn.SetReadDeadline(t) +} + +func (conn *ClientConn) SetWriteDeadline(t time.Time) error { + + // Limitation: this is a workaround; webRTCConn doesn't support + // SetWriteDeadline, but common/quic calls SetWriteDeadline on + // net.PacketConns to avoid hanging on EAGAIN when the conn is an actual + // UDP socket. See the comment in common/quic.writeTimeoutUDPConn. In + // this case, the conn is not a UDP socket and that particular + // SetWriteDeadline use case doesn't apply. Silently ignore the deadline + // and report no error. + + return nil +} + +func (conn *ClientConn) ReadFrom(b []byte) (int, net.Addr, error) { + n, err := conn.webRTCConn.Read(b) + return n, conn.webRTCConn.RemoteAddr(), err +} + +func (conn *ClientConn) WriteTo(b []byte, _ net.Addr) (int, error) { + n, err := conn.webRTCConn.Write(b) + return n, err +} diff --git a/psiphon/common/inproxy/coordinator.go b/psiphon/common/inproxy/coordinator.go new file mode 100644 index 000000000..f7c258e58 --- /dev/null +++ b/psiphon/common/inproxy/coordinator.go @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "net" + "time" +) + +// RoundTripper provides a request/response round trip network transport with +// blocking circumvention capabilities. A typical implementation is domain +// fronted HTTPS. RoundTripper is used by clients and proxies to make +// requests to brokers. +// +// The round trip implementation must apply any specified delay before the +// network round trip begins; and apply the specified timeout to the network +// round trip, excluding any delay. +type RoundTripper interface { + RoundTrip( + ctx context.Context, + roundTripDelay time.Duration, + roundTripTimeout time.Duration, + requestPayload []byte) (responsePayload []byte, err error) +} + +// RoundTripperFailedError is an error type that should be returned from +// RoundTripper.RoundTrip when the round trip transport has permanently +// failed. When RoundTrip returns an error of type RoundTripperFailedError to +// a broker client, the broker client will invoke +// BrokerClientRoundTripperFailed. +type RoundTripperFailedError struct { + err error +} + +func NewRoundTripperFailedError(err error) *RoundTripperFailedError { + return &RoundTripperFailedError{err: err} +} + +func (e RoundTripperFailedError) Error() string { + return e.err.Error() +} + +// BrokerDialCoordinator provides in-proxy dial parameters and configuration, +// used by both clients and proxies, and an interface for signaling when +// parameters are successful or not, to facilitate replay of successful +// parameters. +// +// Each BrokerDialCoordinator should provide values selected in the context of +// a single network, as identified by a network ID. A distinct +// BrokerDialCoordinator should be created for each in-proxy broker dial, +// with new or replayed parameters selected as appropriate. Multiple in-proxy +// client dials and/or proxy runs may share a single BrokerDialCoordinator, +// reducing round trips required to make broker requests. A +// BrokerDialCoordinator implementation must be safe for concurrent calls. +// +// The Psiphon client is expected to create a new BrokerDialCoordinator for +// use by in-proxy clients when the underlying network changes and tunnels +// are redialed. Similarly, in-proxy proxies should be restarted with a new +// BrokerDialCoordinator when the underlying network changes. +type BrokerDialCoordinator interface { + + // Returns the network ID for the network this BrokerDialCoordinator is + // associated with. For a single BrokerDialCoordinator, the NetworkID value + // should not change. Replay-facilitating calls, Succeeded/Failed, all + // assume the network and network ID remain static. The network ID value + // is used by in-proxy dials to track internal state that depends on the + // current network; this includes the port mapping types supported by the + // network. + NetworkID() string + + // Returns the network type for the current network, or NetworkTypeUnknown + // if unknown. + NetworkType() NetworkType + + // CommonCompartmentIDs is the list of common, Psiphon-managed, in-proxy + // compartment IDs known to a client. These IDs are delivered through + // tactics, or embedded in OSLs. + // + // At most MaxCompartmentIDs may be sent to a broker; if necessary, the + // provider may return a subset of known compartment IDs and replay when + // the overall dial is a success; and/or retain only the most recently + // discovered compartment IDs. + // + // CommonCompartmentIDs is not called for proxies. + CommonCompartmentIDs() []ID + + // PersonalCompartmentIDs are compartment IDs distributed from proxy + // operators to client users out-of-band and provide optional access + // control. For example, a proxy operator may want to provide access only + // to certain users, and/or users want to use only a proxy run by a + // certain operator. + // + // At most MaxCompartmentIDs may be sent to a broker; for typical use + // cases, both clients and proxies will specify a single personal + // compartment ID. + PersonalCompartmentIDs() []ID + + // BrokerClientPrivateKey is the client or proxy's private key to be used + // in the secure session established with a broker. Clients should + // generate ephemeral keys; this is done automatically when a zero-value + // SessionPrivateKey is returned. Proxies may generate, persist, and + // long-lived keys to enable traffic attribution to a proxy, identified + // by a proxy ID, the corresponding public key. + BrokerClientPrivateKey() SessionPrivateKey + + // BrokerPublicKey is the public key for the broker selected by the + // provider and reachable via BrokerClientRoundTripper. The broker is + // authenticated in the secure session. + BrokerPublicKey() SessionPublicKey + + // BrokerRootObfuscationSecret is the root obfuscation secret for the + // broker and used in the secure session. + BrokerRootObfuscationSecret() ObfuscationSecret + + // BrokerClientRoundTripper returns a RoundTripper to use for broker + // requests. The provider handles selecting a broker and broker + // addressing, as well as providing a round trip network transport with + // blocking circumvention capabilities. A typical implementation is + // domain fronted HTTPS. The RoundTripper should offer persistent network + // connections and request multiplexing, for example with HTTP/2, so that + // a single connection can be used for many concurrent requests. + // + // Clients and proxies make round trips to establish a secure session with + // the broker, on top of the provided transport, and to exchange API + // requests with the broker. + // + // The implementation must return a RoundTripper connecting to the same + // broker for every call, as multiple-request sequences such as + // ProxyAnnounce and ProxyAnswer depend on broker state. + BrokerClientRoundTripper() (RoundTripper, error) + + // BrokerClientRoundTripperSucceeded is called after a successful round + // trip using the specified RoundTripper. This signal is used to set + // replay for the round tripper's successful dial parameters. + // BrokerClientRoundTripperSucceeded is called once per successful round + // trip; the provider can choose to set replay only once. + BrokerClientRoundTripperSucceeded(roundTripper RoundTripper) + + // BrokerClientRoundTripperSucceeded is called after a failed round trip + // using the specified RoundTripper. This signal is used to clear replay + // for the round tripper's unsuccessful dial parameters. The provider + // will arrange for a new RoundTripper to be returned from the next + // BrokerClientRoundTripper call, discarding the current RoundTripper + // after closing its network resources. + BrokerClientRoundTripperFailed(roundTripper RoundTripper) + + SessionHandshakeRoundTripTimeout() time.Duration + AnnounceRequestTimeout() time.Duration + AnnounceDelay() time.Duration + AnnounceDelayJitter() float64 + AnswerRequestTimeout() time.Duration + OfferRequestTimeout() time.Duration + OfferRetryDelay() time.Duration + OfferRetryJitter() float64 + RelayedPacketRequestTimeout() time.Duration +} + +// WebRTCDialCoordinator provides in-proxy dial parameters and configuration, +// used by both clients and proxies, and an interface for signaling when +// parameters are successful or not, to facilitate replay of successful +// parameters. +// +// Each WebRTCDialCoordinator should provide values selected in the context of +// a single network, as identified by a network ID. A distinct +// WebRTCDialCoordinator should be created for each client in-proxy dial, with +// new or replayed parameters selected as appropriate. One proxy run uses a +// single WebRTCDialCoordinator for all proxied connections. The proxy should +// be restarted with a new WebRTCDialCoordinator when the underlying network +// changes. +// +// A WebRTCDialCoordinator implementation must be safe for concurrent calls. +type WebRTCDialCoordinator interface { + + // Returns the network ID for the network this WebRTCDialCoordinator is + // associated with. For a single WebRTCDialCoordinator, the NetworkID + // value should not change. Replay-facilitating calls, Succeeded/Failed, + // all assume the network and network ID remain static. The network ID + // value is used by in-proxy dials to track internal state that depends + // on the current network; this includes the port mapping types supported + // by the network. + NetworkID() string + + // Returns the network type for the current network, or NetworkTypeUnknown + // if unknown. + NetworkType() NetworkType + + // ClientRootObfuscationSecret is the root obfuscation secret generated by + // or replayed by the client, which will be used to drive and replay + // obfuscation operations for the WebRTC dial, including any DTLS + // randomization. The proxy receives the same root obfuscation secret, + // relayed by the broker, and so the client's selection drives + // obfuscation/replay on both sides. + ClientRootObfuscationSecret() ObfuscationSecret + + // DoDTLSRandomization indicates whether to perform DTLS + // Client/ServerHello randomization. DoDTLSRandomization is specified by + // clients, which may use a weighted coin flip or a replay to determine + // the value. + DoDTLSRandomization() bool + + // DataChannelTrafficShapingParameters returns parameters specifying how + // to perform data channel traffic shapping -- random padding and decoy + // message. Returns nil when no traffic shaping is to be performed. + DataChannelTrafficShapingParameters() *DataChannelTrafficShapingParameters + + // STUNServerAddress selects a STUN server to use for this dial. When + // RFC5780 is true, the STUN server must support RFC5780 NAT discovery; + // otherwise, only basic STUN bind operation support is required. Clients + // and proxies will receive a list of STUN server candidates via tactics, + // and select a candidate at random or replay for each dial. If + // STUNServerAddress returns "", STUN operations are skipped but the dial + // may still succeed if a port mapping can be established. + STUNServerAddress(RFC5780 bool) string + + // STUNServerAddressSucceeded is called after a successful STUN operation + // with the STUN server specified by the address. This signal is used to + // set replay for successful STUN servers. STUNServerAddressSucceeded + // will be called when the STUN opertion succeeds, regardless of the + // outcome of the rest of the dial. RFC5780 is true when the STUN server + // was used for NAT discovery. + STUNServerAddressSucceeded(RFC5780 bool, address string) + + // STUNServerAddressFailed is called after a failed STUN operation and is + // used to clear replay for the specified STUN server. + STUNServerAddressFailed(RFC5780 bool, address string) + + // DiscoverNAT indicates whether a client dial should start with NAT + // discovery. Discovering and reporting the client NAT type will assist + // in broker matching. However, RFC5780 NAT discovery can slow down a + // dial and potentially looks like atypical network traffic. Client NAT + // discovery is controlled by tactics and may be disabled or set to run + // with a small probability. Discovered NAT types and portmapping types + // may be cached and used with future dials via SetNATType/NATType and + // SetPortMappingTypes/PortMappingTypes. + // + // Proxies always perform NAT discovery on start up, since that doesn't + // delay a client dial. + DiscoverNAT() bool + + // DisableSTUN indicates whether to skip STUN operations. + DisableSTUN() bool + + // DisablePortMapping indicates whether to skip port mapping operations. + DisablePortMapping() bool + + // DisableInboundForMobileNetworks indicates that all attempts to set up + // inbound operations -- including STUN and port mapping -- should be + // skipped when the network type is NetworkTypeMobile. This skips + // operations that can slow down dials and and unlikely to succeed on + // most mobile networks with CGNAT. + DisableInboundForMobileNetworks() bool + + // DisableIPv6ICECandidates omits all IPv6 ICE candidates. + DisableIPv6ICECandidates() bool + + // NATType returns any persisted NAT type for the current network, as set + // by SetNATType. When NATTypeUnknown is returned, NAT discovery may be + // run. + NATType() NATType + + // SetNATType is called when the NAT type for the current network has been + // discovered. The provider should persist this value, associated with + // the current network ID and with a reasonable TTL, so the value can be + // reused in subsequent dials without having to re-run NAT discovery. + SetNATType(t NATType) + + // PortMappingTypes returns any persisted, supported port mapping types + // for the current network, as set by SetPortMappingTypes. When an empty + // list is returned port mapping discovery may be run. A list containing + // only PortMappingTypeNone indicates that no supported port mapping + // types were discovered. + PortMappingTypes() PortMappingTypes + + // SetPortMappingTypes is called with the supported port mapping types + // discovered for the current network. The provider should persist this + // value, associated with the current network ID and with a reasonable + // TTL, so the value can be reused in subsequent dials without having to + // re-run port mapping discovery. + SetPortMappingTypes(t PortMappingTypes) + + // ResolveAddress resolves a domain and returns its IP address. Clients + // and proxies may use this to hook into the Psiphon custom resolver. The + // provider adds the custom resolver tactics and network ID parameters + // required by psiphon/common.Resolver. + ResolveAddress(ctx context.Context, network, address string) (string, error) + + // UDPListen creates a local UDP socket. The socket should be bound to a + // specific interface as required for VPN modes, and set a write timeout + // to mitigate the issue documented in psiphon/common.WriteTimeoutUDPConn. + UDPListen(ctx context.Context) (net.PacketConn, error) + + // UDPConn creates a local UDP socket "connected" to the specified remote + // address. The socket should be excluded from VPN routing. This socket + // is used to determine the local address of the active interface the OS + // will select for the specified network ("udp4" for IPv4 or "udp6" for + // IPv6) and remote destination. For this use case, the socket will not + // be used to send network traffic. + UDPConn(ctx context.Context, network, remoteAddress string) (net.PacketConn, error) + + // BindToDevice binds a socket, specified by the file descriptor, to an + // interface that isn't routed through a VPN when Psiphon is running in + // VPN mode. BindToDevice is used in cases where a custom dialer cannot + // be used, and UDPListen cannot be called. If no file descriptor + // operation is required, BindToDevice should take no action and return + // nil. + BindToDevice(fileDescriptor int) error + + // ProxyUpstreamDial is used by the proxy when dialing a TCP or UDP + // upstream connection to a destination Psiphon server. This dial + // callback allows for TCP/UDP-level dial tactics parameters to be + // applied, as appropriate, to the upstream dial from the proxy vantage + // point; and possible replay of those parameters. In addition, + // underlying sockets should be bound to a specific interface as required + // when the proxy app is also running a VPN. + ProxyUpstreamDial(ctx context.Context, network, address string) (net.Conn, error) + + DiscoverNATTimeout() time.Duration + WebRTCAnswerTimeout() time.Duration + WebRTCAwaitDataChannelTimeout() time.Duration + ProxyDestinationDialTimeout() time.Duration +} diff --git a/psiphon/common/inproxy/coordinator_test.go b/psiphon/common/inproxy/coordinator_test.go new file mode 100644 index 000000000..0e22147a2 --- /dev/null +++ b/psiphon/common/inproxy/coordinator_test.go @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "encoding/json" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/stacktrace" +) + +type testBrokerDialCoordinator struct { + mutex sync.Mutex + networkID string + networkType NetworkType + commonCompartmentIDs []ID + personalCompartmentIDs []ID + brokerClientPrivateKey SessionPrivateKey + brokerPublicKey SessionPublicKey + brokerRootObfuscationSecret ObfuscationSecret + brokerClientRoundTripper RoundTripper + brokerClientRoundTripperSucceeded func(RoundTripper) + brokerClientRoundTripperFailed func(RoundTripper) + sessionHandshakeRoundTripTimeout time.Duration + announceRequestTimeout time.Duration + announceDelay time.Duration + announceDelayJitter float64 + answerRequestTimeout time.Duration + offerRequestTimeout time.Duration + offerRetryDelay time.Duration + offerRetryJitter float64 + relayedPacketRequestTimeout time.Duration +} + +func (t *testBrokerDialCoordinator) NetworkID() string { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.networkID +} + +func (t *testBrokerDialCoordinator) NetworkType() NetworkType { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.networkType +} + +func (t *testBrokerDialCoordinator) CommonCompartmentIDs() []ID { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.commonCompartmentIDs +} + +func (t *testBrokerDialCoordinator) PersonalCompartmentIDs() []ID { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.personalCompartmentIDs +} + +func (t *testBrokerDialCoordinator) BrokerClientPrivateKey() SessionPrivateKey { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.brokerClientPrivateKey +} + +func (t *testBrokerDialCoordinator) BrokerPublicKey() SessionPublicKey { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.brokerPublicKey +} + +func (t *testBrokerDialCoordinator) BrokerRootObfuscationSecret() ObfuscationSecret { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.brokerRootObfuscationSecret +} + +func (t *testBrokerDialCoordinator) BrokerClientRoundTripper() (RoundTripper, error) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.brokerClientRoundTripper, nil +} + +func (t *testBrokerDialCoordinator) BrokerClientRoundTripperSucceeded(roundTripper RoundTripper) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.brokerClientRoundTripperSucceeded(roundTripper) +} + +func (t *testBrokerDialCoordinator) BrokerClientRoundTripperFailed(roundTripper RoundTripper) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.brokerClientRoundTripperFailed(roundTripper) +} + +func (t *testBrokerDialCoordinator) SessionHandshakeRoundTripTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.sessionHandshakeRoundTripTimeout +} + +func (t *testBrokerDialCoordinator) AnnounceRequestTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.announceRequestTimeout +} + +func (t *testBrokerDialCoordinator) AnnounceDelay() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.announceDelay +} + +func (t *testBrokerDialCoordinator) AnnounceDelayJitter() float64 { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.announceDelayJitter +} + +func (t *testBrokerDialCoordinator) AnswerRequestTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.answerRequestTimeout +} + +func (t *testBrokerDialCoordinator) OfferRequestTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.offerRequestTimeout +} + +func (t *testBrokerDialCoordinator) OfferRetryDelay() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.offerRetryDelay +} + +func (t *testBrokerDialCoordinator) OfferRetryJitter() float64 { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.offerRetryJitter +} + +func (t *testBrokerDialCoordinator) RelayedPacketRequestTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.relayedPacketRequestTimeout +} + +type testWebRTCDialCoordinator struct { + mutex sync.Mutex + networkID string + networkType NetworkType + clientRootObfuscationSecret ObfuscationSecret + doDTLSRandomization bool + trafficShapingParameters *DataChannelTrafficShapingParameters + stunServerAddress string + stunServerAddressRFC5780 string + stunServerAddressSucceeded func(RFC5780 bool, address string) + stunServerAddressFailed func(RFC5780 bool, address string) + discoverNAT bool + disableSTUN bool + disablePortMapping bool + disableInboundForMobileNetworks bool + disableIPv6ICECandidates bool + natType NATType + setNATType func(NATType) + portMappingTypes PortMappingTypes + setPortMappingTypes func(PortMappingTypes) + bindToDevice func(int) error + discoverNATTimeout time.Duration + webRTCAnswerTimeout time.Duration + webRTCAwaitDataChannelTimeout time.Duration + proxyDestinationDialTimeout time.Duration +} + +func (t *testWebRTCDialCoordinator) NetworkID() string { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.networkID +} + +func (t *testWebRTCDialCoordinator) NetworkType() NetworkType { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.networkType +} + +func (t *testWebRTCDialCoordinator) ClientRootObfuscationSecret() ObfuscationSecret { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.clientRootObfuscationSecret +} + +func (t *testWebRTCDialCoordinator) DoDTLSRandomization() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.doDTLSRandomization +} + +func (t *testWebRTCDialCoordinator) DataChannelTrafficShapingParameters() *DataChannelTrafficShapingParameters { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.trafficShapingParameters +} + +func (t *testWebRTCDialCoordinator) STUNServerAddress(RFC5780 bool) string { + t.mutex.Lock() + defer t.mutex.Unlock() + if RFC5780 { + return t.stunServerAddressRFC5780 + } + return t.stunServerAddress +} + +func (t *testWebRTCDialCoordinator) STUNServerAddressSucceeded(RFC5780 bool, address string) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.stunServerAddressSucceeded(RFC5780, address) +} + +func (t *testWebRTCDialCoordinator) STUNServerAddressFailed(RFC5780 bool, address string) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.stunServerAddressFailed(RFC5780, address) +} + +func (t *testWebRTCDialCoordinator) DiscoverNAT() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.discoverNAT +} + +func (t *testWebRTCDialCoordinator) DisableSTUN() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.disableSTUN +} + +func (t *testWebRTCDialCoordinator) DisablePortMapping() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.disablePortMapping +} + +func (t *testWebRTCDialCoordinator) DisableInboundForMobileNetworks() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.disableInboundForMobileNetworks +} + +func (t *testWebRTCDialCoordinator) DisableIPv6ICECandidates() bool { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.disableIPv6ICECandidates +} + +func (t *testWebRTCDialCoordinator) NATType() NATType { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.natType +} + +func (t *testWebRTCDialCoordinator) SetNATType(natType NATType) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.natType = natType + t.setNATType(natType) +} + +func (t *testWebRTCDialCoordinator) PortMappingTypes() PortMappingTypes { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.portMappingTypes +} + +func (t *testWebRTCDialCoordinator) SetPortMappingTypes(portMappingTypes PortMappingTypes) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.portMappingTypes = append(PortMappingTypes{}, portMappingTypes...) + t.setPortMappingTypes(portMappingTypes) +} + +func (t *testWebRTCDialCoordinator) ResolveAddress(ctx context.Context, network, address string) (string, error) { + + // Note: can't use common/resolver due to import cycle + + hostname, port, err := net.SplitHostPort(address) + if err != nil { + return "", errors.Trace(err) + } + + r := &net.Resolver{} + IPs, err := r.LookupIP(ctx, network, hostname) + if err != nil { + return "", errors.Trace(err) + } + + return net.JoinHostPort(IPs[0].String(), port), nil +} + +func (t *testWebRTCDialCoordinator) UDPListen(_ context.Context) (net.PacketConn, error) { + t.mutex.Lock() + defer t.mutex.Unlock() + conn, err := net.ListenUDP("udp", nil) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} + +func (t *testWebRTCDialCoordinator) UDPConn(_ context.Context, network, remoteAddress string) (net.PacketConn, error) { + t.mutex.Lock() + defer t.mutex.Unlock() + switch network { + case "udp", "udp4", "udp6": + default: + return nil, errors.TraceNew("invalid network") + } + conn, err := net.Dial(network, remoteAddress) + if err != nil { + return nil, errors.Trace(err) + } + return conn.(*net.UDPConn), nil +} + +func (t *testWebRTCDialCoordinator) BindToDevice(fileDescriptor int) error { + t.mutex.Lock() + defer t.mutex.Unlock() + return errors.Trace(t.bindToDevice(fileDescriptor)) +} + +func (t *testWebRTCDialCoordinator) ProxyUpstreamDial(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + conn, err := dialer.DialContext(ctx, network, address) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} + +func (t *testWebRTCDialCoordinator) DiscoverNATTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.discoverNATTimeout +} + +func (t *testWebRTCDialCoordinator) WebRTCAnswerTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.webRTCAnswerTimeout +} + +func (t *testWebRTCDialCoordinator) WebRTCAwaitDataChannelTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.webRTCAwaitDataChannelTimeout +} + +func (t *testWebRTCDialCoordinator) ProxyDestinationDialTimeout() time.Duration { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.proxyDestinationDialTimeout +} + +type testLogger struct { + logLevelDebug int32 +} + +func newTestLogger() *testLogger { + return &testLogger{logLevelDebug: 1} +} + +func (logger *testLogger) WithTrace() common.LogTrace { + return &testLoggerTrace{ + logger: logger, + trace: stacktrace.GetParentFunctionName(), + } +} + +func (logger *testLogger) WithTraceFields(fields common.LogFields) common.LogTrace { + return &testLoggerTrace{ + logger: logger, + trace: stacktrace.GetParentFunctionName(), + fields: fields, + } +} + +func (logger *testLogger) LogMetric(metric string, fields common.LogFields) { + jsonFields, _ := json.Marshal(fields) + fmt.Printf( + "[%s] METRIC: %s: %s\n", + time.Now().UTC().Format(time.RFC3339), + metric, + string(jsonFields)) +} + +func (logger *testLogger) IsLogLevelDebug() bool { + return atomic.LoadInt32(&logger.logLevelDebug) == 1 +} + +func (logger *testLogger) SetLogLevelDebug(logLevelDebug bool) { + value := int32(0) + if logLevelDebug { + value = 1 + } + atomic.StoreInt32(&logger.logLevelDebug, value) +} + +type testLoggerTrace struct { + logger *testLogger + trace string + fields common.LogFields +} + +func (logger *testLoggerTrace) log(priority, message string) { + now := time.Now().UTC().Format(time.RFC3339) + if len(logger.fields) == 0 { + fmt.Printf( + "[%s] %s: %s: %s\n", + now, priority, logger.trace, message) + } else { + fields := common.LogFields{} + for k, v := range logger.fields { + switch v := v.(type) { + case error: + // Workaround for Go issue 5161: error types marshal to "{}" + fields[k] = v.Error() + default: + fields[k] = v + } + } + jsonFields, _ := json.Marshal(fields) + fmt.Printf( + "[%s] %s: %s: %s %s\n", + now, priority, logger.trace, message, string(jsonFields)) + } +} + +func (logger *testLoggerTrace) Debug(args ...interface{}) { + if !logger.logger.IsLogLevelDebug() { + return + } + logger.log("DEBUG", fmt.Sprint(args...)) +} + +func (logger *testLoggerTrace) Info(args ...interface{}) { + logger.log("INFO", fmt.Sprint(args...)) +} + +func (logger *testLoggerTrace) Warning(args ...interface{}) { + logger.log("WARNING", fmt.Sprint(args...)) +} + +func (logger *testLoggerTrace) Error(args ...interface{}) { + logger.log("ERROR", fmt.Sprint(args...)) +} diff --git a/psiphon/common/inproxy/debug.go b/psiphon/common/inproxy/debug.go new file mode 100644 index 000000000..64cfa0a77 --- /dev/null +++ b/psiphon/common/inproxy/debug.go @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "sync/atomic" +) + +var allowCommonASNMatching int32 + +func GetAllowCommonASNMatching() bool { + return atomic.LoadInt32(&allowCommonASNMatching) == 1 +} + +// SetAllowCommonASNMatching configures whether to allow matching proxies and +// clients with the same GeoIP country and ASN. This matching is always +// permitted for matching personal compartment IDs, but for common +// compartment IDs, these matches are not allowed as they are not expected to +// be useful. SetAllowCommonASNMatching is for end-to-end testing on a single +// host, and should be used only for testing purposes. +func SetAllowCommonASNMatching(allow bool) { + value := int32(0) + if allow { + value = 1 + } + atomic.StoreInt32(&allowCommonASNMatching, value) +} + +var allowBogonWebRTCConnections int32 + +func GetAllowBogonWebRTCConnections() bool { + return atomic.LoadInt32(&allowBogonWebRTCConnections) == 1 +} + +// SetAllowBogonWebRTCConnections configures whether to allow bogon ICE +// candidates in WebRTC session descriptions. This included loopback and +// private network candidates. By default, bogon addresses are exclude as +// they are not expected to be useful and may expose private network +// information. SetAllowBogonWebRTCConnections is for end-to-end testing on a +// single host, and should be used only for testing purposes. +func SetAllowBogonWebRTCConnections(allow bool) { + value := int32(0) + if allow { + value = 1 + } + atomic.StoreInt32(&allowBogonWebRTCConnections, value) +} diff --git a/psiphon/common/inproxy/discovery.go b/psiphon/common/inproxy/discovery.go new file mode 100644 index 000000000..fb638ac9a --- /dev/null +++ b/psiphon/common/inproxy/discovery.go @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "sync" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" +) + +const ( + discoverNATTimeout = 10 * time.Second + discoverNATRoundTripTimeout = 2 * time.Second +) + +// NATDiscoverConfig specifies the configuration for a NATDiscover run. +type NATDiscoverConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // WebRTCDialCoordinator specifies specific STUN and discovery and + // settings, and receives discovery results. + WebRTCDialCoordinator WebRTCDialCoordinator + + // SkipPortMapping indicates whether to skip port mapping type discovery, + // as clients do since they will gather the same stats during the WebRTC + // offer preparation. + SkipPortMapping bool +} + +// NATDiscover runs NAT type and port mapping type discovery operations. +// +// Successfuly results are delivered to NATDiscoverConfig.WebRTCDialCoordinator +// callbacks, SetNATType and SetPortMappingTypes, which should cache results +// associated with the current network, by network ID. +// +// NAT discovery will invoke WebRTCDialCoordinator callbacks +// STUNServerAddressSucceeded and STUNServerAddressFailed, which may be used +// to mark or unmark STUN servers for replay. +func NATDiscover( + ctx context.Context, + config *NATDiscoverConfig) { + + // Run discovery until the specified timeout, or ctx is done. NAT and port + // mapping discovery are run concurrently. + + discoverCtx, cancelFunc := context.WithTimeout( + ctx, common.ValueOrDefault(config.WebRTCDialCoordinator.DiscoverNATTimeout(), discoverNATTimeout)) + defer cancelFunc() + + discoveryWaitGroup := new(sync.WaitGroup) + + if config.WebRTCDialCoordinator.NATType().NeedsDiscovery() && + !config.WebRTCDialCoordinator.DisableSTUN() { + + discoveryWaitGroup.Add(1) + go func() { + defer discoveryWaitGroup.Done() + + natType, err := discoverNATType(discoverCtx, config) + + if err == nil { + // Deliver the result. The WebRTCDialCoordinator provider may cache + // this result, associated wih the current networkID. + config.WebRTCDialCoordinator.SetNATType(natType) + } + + config.Logger.WithTraceFields(common.LogFields{ + "nat_type": natType, + "error": err, + }).Info("NAT type discovery") + + }() + } + + if !config.SkipPortMapping && + config.WebRTCDialCoordinator.PortMappingTypes().NeedsDiscovery() && + !config.WebRTCDialCoordinator.DisablePortMapping() { + + discoveryWaitGroup.Add(1) + go func() { + defer discoveryWaitGroup.Done() + + portMappingTypes, err := discoverPortMappingTypes( + discoverCtx, config.Logger) + + if err == nil { + // Deliver the result. The WebRTCDialCoordinator provider may cache + // this result, associated wih the current networkID. + config.WebRTCDialCoordinator.SetPortMappingTypes(portMappingTypes) + } + + config.Logger.WithTraceFields(common.LogFields{ + "port_mapping_types": portMappingTypes, + "error": err, + }).Info("Port mapping type discovery") + + }() + } + + discoveryWaitGroup.Wait() +} + +func discoverNATType( + ctx context.Context, + config *NATDiscoverConfig) (NATType, error) { + + RFC5780 := true + stunServerAddress := config.WebRTCDialCoordinator.STUNServerAddress(RFC5780) + + if stunServerAddress == "" { + return NATTypeUnknown, errors.TraceNew("no RFC5780 STUN server") + } + + serverAddress, err := config.WebRTCDialCoordinator.ResolveAddress( + ctx, "ip", stunServerAddress) + if err != nil { + return NATTypeUnknown, errors.Trace(err) + } + + // The STUN server will observe proxy IP addresses. Enumeration is + // mitigated by using various public STUN servers, including Psiphon STUN + // servers for proxies in non-censored regions. Proxies are also more + // ephemeral than Psiphon servers. + + // Limitation: RFC5780, "4.1. Source Port Selection" recommends using the + // same source port for NAT discovery _and_ subsequent NAT traveral + // applications, such as WebRTC ICE. It's stated that the discovered NAT + // type may only be valid for the particular tested port. + // + // We don't do this at this time, as we don't want to incur the full + // RFC5780 discovery overhead for every WebRTC dial, and expect that, in + // most typical cases, the network NAT type applies to all ports. + // Furthermore, the UDP conn that owns the tested port may need to be + // closed to interrupt discovery. + + // We run the filtering test before the mapping test, and each test uses a + // distinct source port; using the same source port may result in NAT + // state from one test confusing the other test. See also, + // https://github.com/jselbie/stunserver/issues/18: + // + // > running both the behavior test and the filtering test at the + // > same time can cause an incorrect filtering type to be detected. + // > If the filtering is actually "address dependent", the scan will + // > report it as "endpoint independent". + // > + // > The cause appears to be the order in which the tests are being + // > performed, currently "behavior" tests followed by "filtering" + // > tests. The network traffic from the behavior tests having been run + // > causes the router to allow filtering test responses back through + // > that would not have otherwise been allowed... The behavior tests + // > send traffic to the secondary IP of the STUN server, so the + // > filtering tests are allowed to get responses back from that + // > secondary IP. + // > + // > The fix is likely some combination of ...re-order the tests... + // > or use the a different port for the filtering test. + // + // TODO: RFC5780, "4.5 Combining and Ordering Tests", suggests that the + // individual test steps within filtering and mapping could be combined, + // and certain tests may be run concurrently, with the goal of reducing + // the total elapsed test time. However, "care must be taken when + // combining and parallelizing tests, due to the sensitivity of certain + // tests to prior state on the NAT and because some NAT devices have an + // upper limit on how quickly bindings will be allocated." + // + // For now, we stick with a conservative arrangement of tests. Note that, + // in practise, the discoverNATMapping completes much faster + // discoverNATFiltering, and so there's a limited gain from running these + // two top-level tests concurrently. + + mappingConn, err := config.WebRTCDialCoordinator.UDPListen(ctx) + if err != nil { + return NATTypeUnknown, errors.Trace(err) + } + defer mappingConn.Close() + + filteringConn, err := config.WebRTCDialCoordinator.UDPListen(ctx) + if err != nil { + return NATTypeUnknown, errors.Trace(err) + } + defer filteringConn.Close() + + type result struct { + NATType NATType + err error + } + resultChannel := make(chan result, 1) + + go func() { + + filtering, err := discoverNATFiltering(ctx, filteringConn, serverAddress) + if err != nil { + resultChannel <- result{err: errors.Trace(err)} + return + } + + mapping, err := discoverNATMapping(ctx, mappingConn, serverAddress) + if err != nil { + resultChannel <- result{err: errors.Trace(err)} + return + } + + resultChannel <- result{NATType: MakeNATType(mapping, filtering)} + return + }() + + var r result + select { + case r = <-resultChannel: + case <-ctx.Done(): + + // Interrupt and await the goroutine + mappingConn.Close() + filteringConn.Close() + <-resultChannel + + // Don't call STUNServerAddressFailed, since ctx.Done may be due to an + // early dial cancel. + return NATTypeUnknown, errors.Trace(ctx.Err()) + } + + if r.err != nil { + + config.WebRTCDialCoordinator.STUNServerAddressFailed(RFC5780, stunServerAddress) + + return NATTypeUnknown, errors.Trace(err) + } + + config.WebRTCDialCoordinator.STUNServerAddressSucceeded(RFC5780, stunServerAddress) + + return r.NATType, nil +} + +func discoverPortMappingTypes( + ctx context.Context, + logger common.Logger) (PortMappingTypes, error) { + + portMappingTypes, err := probePortMapping(ctx, logger) + if err != nil { + return nil, errors.Trace(err) + } + + return portMappingTypes, nil +} diff --git a/psiphon/common/inproxy/discoverySTUN.go b/psiphon/common/inproxy/discoverySTUN.go new file mode 100644 index 000000000..89cff8ad2 --- /dev/null +++ b/psiphon/common/inproxy/discoverySTUN.go @@ -0,0 +1,245 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "net" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/pion/stun" +) + +// discoverNATMapping and discoverNATFiltering are modifications of: +// https://github.com/pion/stun/blob/b321a45be43b07685c639943aaa28e6841517799/cmd/stun-nat-behaviour/main.go + +// https://github.com/pion/stun/blob/b321a45be43b07685c639943aaa28e6841517799/LICENSE.md: +/* +Copyright 2018 Pion LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// RFC5780: 4.3. Determining NAT Mapping Behavior +func discoverNATMapping( + ctx context.Context, + conn net.PacketConn, + serverAddress string) (NATMapping, error) { + + // Test I: Regular binding request + + request := stun.MustBuild(stun.TransactionID, stun.BindingRequest) + + response, _, err := doSTUNRoundTrip(request, conn, serverAddress) + if err != nil { + return NATMappingUnknown, errors.Trace(err) + } + responseFields := parseSTUNMessage(response) + if responseFields.xorAddr == nil || responseFields.otherAddr == nil { + return NATMappingUnknown, errors.TraceNew("NAT discovery not supported") + } + if responseFields.xorAddr.String() == conn.LocalAddr().String() { + return NATMappingEndpointIndependent, nil + } + + otherAddress := responseFields.otherAddr + + // Verify that otherAddress, specified by STUN server, is a valid public + // IP before sending a packet to it. This prevents the STUN server + // (or injected response) from redirecting the flow to an internal network. + + if common.IsBogon(otherAddress.IP) { + return NATMappingUnknown, errors.TraceNew("OTHER-ADDRESS is bogon") + } + + // Test II: Send binding request to the other address but primary port + + _, serverPort, err := net.SplitHostPort(serverAddress) + if err != nil { + return NATMappingUnknown, errors.Trace(err) + } + + address := net.JoinHostPort(otherAddress.IP.String(), serverPort) + response2, _, err := doSTUNRoundTrip(request, conn, address) + if err != nil { + return NATMappingUnknown, errors.Trace(err) + } + response2Fields := parseSTUNMessage(response2) + if response2Fields.xorAddr.String() == responseFields.xorAddr.String() { + return NATMappingEndpointIndependent, nil + } + + // Test III: Send binding request to the other address and port + + response3, _, err := doSTUNRoundTrip(request, conn, otherAddress.String()) + if err != nil { + return NATMappingUnknown, errors.Trace(err) + } + response3Fields := parseSTUNMessage(response3) + if response3Fields.xorAddr.String() == response2Fields.xorAddr.String() { + return NATMappingAddressDependent, nil + } else { + return NATMappingAddressPortDependent, nil + } + + return NATMappingUnknown, nil +} + +// RFC5780: 4.4. Determining NAT Filtering Behavior +func discoverNATFiltering( + ctx context.Context, + conn net.PacketConn, + serverAddress string) (NATFiltering, error) { + + // Test I: Regular binding request + + request := stun.MustBuild(stun.TransactionID, stun.BindingRequest) + response, _, err := doSTUNRoundTrip(request, conn, serverAddress) + if err != nil { + return NATFilteringUnknown, errors.Trace(err) + } + responseFields := parseSTUNMessage(response) + if responseFields.xorAddr == nil || responseFields.otherAddr == nil { + return NATFilteringUnknown, errors.TraceNew("NAT discovery not supported") + } + + // Test II: Request to change both IP and port + + request = stun.MustBuild(stun.TransactionID, stun.BindingRequest) + request.Add(stun.AttrChangeRequest, []byte{0x00, 0x00, 0x00, 0x06}) + + response, responseTimeout, err := doSTUNRoundTrip(request, conn, serverAddress) + if err == nil { + return NATFilteringEndpointIndependent, nil + } else if !responseTimeout { + return NATFilteringUnknown, errors.Trace(err) + } + + // Test III: Request to change port only + + request = stun.MustBuild(stun.TransactionID, stun.BindingRequest) + request.Add(stun.AttrChangeRequest, []byte{0x00, 0x00, 0x00, 0x02}) + + response, responseTimeout, err = doSTUNRoundTrip(request, conn, serverAddress) + if err == nil { + return NATFilteringAddressDependent, nil + } else if !responseTimeout { + return NATFilteringUnknown, errors.Trace(err) + } + + return NATFilteringAddressPortDependent, nil +} + +func parseSTUNMessage(message *stun.Message) (ret struct { + xorAddr *stun.XORMappedAddress + otherAddr *stun.OtherAddress + respOrigin *stun.ResponseOrigin + mappedAddr *stun.MappedAddress + software *stun.Software +}, +) { + ret.mappedAddr = &stun.MappedAddress{} + ret.xorAddr = &stun.XORMappedAddress{} + ret.respOrigin = &stun.ResponseOrigin{} + ret.otherAddr = &stun.OtherAddress{} + ret.software = &stun.Software{} + if ret.xorAddr.GetFrom(message) != nil { + ret.xorAddr = nil + } + if ret.otherAddr.GetFrom(message) != nil { + ret.otherAddr = nil + } + if ret.respOrigin.GetFrom(message) != nil { + ret.respOrigin = nil + } + if ret.mappedAddr.GetFrom(message) != nil { + ret.mappedAddr = nil + } + if ret.software.GetFrom(message) != nil { + ret.software = nil + } + return ret +} + +// doSTUNRoundTrip returns nil, true, nil on timeout reading a response. +func doSTUNRoundTrip( + request *stun.Message, + conn net.PacketConn, + remoteAddress string) (*stun.Message, bool, error) { + + remoteAddr, err := net.ResolveUDPAddr("udp", remoteAddress) + if err != nil { + return nil, false, errors.Trace(err) + } + + _ = request.NewTransactionID() + _, err = conn.WriteTo(request.Raw, remoteAddr) + if err != nil { + return nil, false, errors.Trace(err) + } + + conn.SetReadDeadline(time.Now().Add(discoverNATRoundTripTimeout)) + + var buffer [1500]byte + n, _, err := conn.ReadFrom(buffer[:]) + if err != nil { + if e, ok := err.(net.Error); ok && e.Timeout() { + return nil, true, errors.Trace(err) + } + return nil, false, errors.Trace(err) + } + + response := new(stun.Message) + response.Raw = buffer[:n] + err = response.Decode() + if err != nil { + return nil, false, errors.Trace(err) + } + + // Verify that the response packet has the expected transaction ID, to + // partially mitigate against phony injected responses. + + if response.TransactionID != request.TransactionID { + return nil, false, errors.TraceNew( + "unexpected response transaction ID") + } + + return response, false, nil +} diff --git a/psiphon/common/inproxy/discovery_test.go b/psiphon/common/inproxy/discovery_test.go new file mode 100644 index 000000000..fde78b23c --- /dev/null +++ b/psiphon/common/inproxy/discovery_test.go @@ -0,0 +1,132 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "sync/atomic" + "testing" +) + +func TestNATDiscovery(t *testing.T) { + + // TODO: run local STUN and port mapping servers to test against, along + // with iptables rules to simulate NAT conditions + + stunServerAddress := "stun.nextcloud.com:443" + + var setNATTypeCallCount, + setPortMappingTypesCallCount, + stunServerAddressSucceededCallCount, + stunServerAddressFailedCallCount int32 + + coordinator := &testWebRTCDialCoordinator{ + stunServerAddress: stunServerAddress, + stunServerAddressRFC5780: stunServerAddress, + + setNATType: func(NATType) { + atomic.AddInt32(&setNATTypeCallCount, 1) + }, + + setPortMappingTypes: func(PortMappingTypes) { + atomic.AddInt32(&setPortMappingTypesCallCount, 1) + }, + + stunServerAddressSucceeded: func(RFC5780 bool, address string) { + atomic.AddInt32(&stunServerAddressSucceededCallCount, 1) + if address != stunServerAddress { + t.Errorf("unexpected STUN server address") + } + }, + + stunServerAddressFailed: func(RFC5780 bool, address string) { + atomic.AddInt32(&stunServerAddressFailedCallCount, 1) + if address != stunServerAddress { + t.Errorf("unexpected STUN server address") + } + }, + } + + checkCallCounts := func(a, b, c, d int32) { + callCount := atomic.LoadInt32(&setNATTypeCallCount) + if callCount != a { + t.Errorf( + "unexpected setNATType call count: %d", + callCount) + } + + callCount = atomic.LoadInt32(&setPortMappingTypesCallCount) + if callCount != b { + t.Errorf( + "unexpected setPortMappingTypes call count: %d", + callCount) + } + + callCount = atomic.LoadInt32(&stunServerAddressSucceededCallCount) + if callCount != c { + t.Errorf( + "unexpected stunServerAddressSucceeded call count: %d", + callCount) + } + + callCount = atomic.LoadInt32(&stunServerAddressFailedCallCount) + if callCount != d { + t.Errorf( + "unexpected stunServerAddressFailedCallCount call count: %d", + callCount) + } + } + + config := &NATDiscoverConfig{ + Logger: newTestLogger(), + WebRTCDialCoordinator: coordinator, + } + + // Should do STUN only + + coordinator.disablePortMapping = true + + NATDiscover(context.Background(), config) + + checkCallCounts(1, 0, 1, 0) + + // Should do port mapping only + + coordinator.disableSTUN = true + coordinator.disablePortMapping = false + + NATDiscover(context.Background(), config) + + checkCallCounts(1, 1, 1, 0) + + // Should skip both and use values cached in WebRTCDialCoordinator + + coordinator.disableSTUN = false + coordinator.disablePortMapping = false + + NATDiscover(context.Background(), config) + + checkCallCounts(1, 1, 1, 0) + + t.Logf("NAT Type: %s", coordinator.NATType()) + t.Logf("Port Mapping Types: %s", coordinator.PortMappingTypes()) +} diff --git a/psiphon/common/inproxy/doc.go b/psiphon/common/inproxy/doc.go new file mode 100644 index 000000000..54bbe9449 --- /dev/null +++ b/psiphon/common/inproxy/doc.go @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +/* +Package inproxy enables 3rd party, ephemeral proxies to help Psiphon clients +connect to the Psiphon network. + +The in-proxy architecture is inspired by and similar to Tor's snowflake +pluggable transport, https://snowflake.torproject.org/. + +With in-proxy, Psiphon clients are matched with proxies by brokers run by the +Psiphon network. + +In addition to proxies in unblocked regions, proxies in blocked regions are +supported, to facilitate the use cases such as a local region hop from a +mobile ISP, where international traffic may be expensive and throttled, to a +home ISP, which may be less restricted. + +The proxy/server hop uses the full range of Psiphon tunnel protocols, +providing blocking circumvention on the 2nd hop. + +Proxies don't create Psiphon tunnels, they just relay either TCP or UDP flows +from the client to the server, where those flows are Psiphon tunnel +protocols. Proxies don't need to be upgraded in order to relay newer Psiphon +tunnel protocols or protocol variants. + +Proxies cannot see the client traffic within the relayed Psiphon tunnel. +Brokers verify that client destinations are valid Psiphon servers only, so +proxies cannot be misused for non-Psiphon relaying. + +To limit the set of Psiphon servers that proxies can observe and enumerate, +client destinations are limited to the set of servers specifically designated +with in-proxy capabilities. This is enforced by the broker. + +Proxies are compartmentalized in two ways; (1) personal proxies will use a +personal compartment ID to limit access to clients run by users with whom the +proxy operator has shared, out-of-band, a personal compartment ID, or access +token; (2) common proxies will be assigned a common compartment ID by the +Psiphon network to limit access to clients that have obtained the common +compartment ID, or access token, from Psiphon through channels such as +targeted tactics or embedded in OSLs. + +Proxies are expected to be run for longer periods, on desktop computers. The +in-proxy design does not currently support browser extension or website +widget proxies. + +The client/proxy hop uses WebRTC, with the broker playing the role of a WebRTC +signaling server in addition to matching clients and proxies. Clients and +proxies gather ICE candidates, including any host candidates, IPv4 or IPv6, +as well as STUN server reflexive candidates. In addition, any available port +mapping protocols -- UPnP-IGD, NAT-PMP, PCP -- are used to gather port +mapping candidates, which are injected into ICE SDPs as host candidates. TURN +candidates are not used. + +NAT topology discovery is performed and metrics sent to broker to optimize +utility and matching of proxies to clients. Mobile networks may be assumed to +be CGNAT in case NAT discovery fails or is skipped. And, for mobile networks, +there is an option to skip discovery and STUN for a faster dial. + +The client-proxy is a WebRTC data channel; on the wire, it is DTLS, preceded +by an ICE STUN packet. By default, WebRTC DTLS is configured to look like +common browsers. In addition, the DTLS ClientHello can be randomized. Proxy +endpoints are ephemeral, but if they were to be scanned or probed, the +response should look like common WebRTC stacks that receive packets from +invalid peers. + +Clients and proxies connect to brokers via a domain fronting transport; the +transport is abstracted and other channels may be provided. Within that +transport, a Noise protocol framework session is established between +clients/proxies and a broker, to ensure privacy, authentication, and replay +defense between the end points; not even a domain fronting CDN can observe +the transactions within a session. The session has an additional obfuscation +layer that renders the messages as fully random, which may be suitable for +encapsulating in plaintext transports; adds random padding; and detects +replay of any message. + +For clients and proxies, all broker and WebRTC dial parameters, including +domain fronting, STUN server selection, NAT discovery behavior, timeouts, and +so on are remotely configurable via Psiphon tactics. Callbacks facilitate +replay of successful dial parameters for individual stages of a dial, +including a successful broker connection, or a working STUN server. + +For each proxied client tunnel, brokers use secure sessions to send the +destination Psiphon server a message indicating the proxy ID that's relaying +the client's traffic, the original client IP, and additional metrics to be +logged with the server_tunnel log for the tunnel. Neither a client nor a +proxy is trusted to report the original client IP or the proxy ID. + +Instead of having the broker connect out to Psiphon servers, and trying to +synchronize reliable arrival of these messages, the broker uses the client to +relay secure session packets -- the message, preceded by a session handshake +if required -- inline, in the client/broker and client/server tunnel +connections. These session packets piggyback on top of client/broker and +client/server round trips that happen anyway, including the Psiphon API +handshake. + +Psiphon servers with in-proxy capabilities should be configured, on in-proxy +listeners, to require receipt of this broker message before finalizing +traffic rules, issuing tactics, issuing OSL progress, or allowing traffic +tunneling. The original client IP reported by the broker should be used for +all client GeoIP policy decisions and logging. + +The proxy ID corresponds to the proxy's secure session public key; the proxy +proves possession of the corresponding private key in the session handshake. +Proxy IDs are not revealed to clients; only to brokers and Psiphon servers. A +proxy may maintain a long-term key pair and corresponding proxy ID, and that +may be used by Psiphon to assign reputation to well-performing proxies or to +issue rewards for proxies. + +Each secure session public key is an Ed25519 public key. This public key is +used for signatures, including the session reset token in the session +protocol. This signing key may also be used, externally, in a +challenge/response registration process where a proxy operator can +demonstrate ownership of a proxy public key and its corresponding proxy ID. +For use in ECDH in the Noise protocol, the Ed25519 public key is converted to +the corresponding, unique Curve25519 public key. + +Logged proxy ID values will be the Curve25519 representation of the public +key. Since Curve25519 public keys don't uniquely map back to Ed25519 public +keys, any external proxy registration system should store the Ed25519 public +key and derive the corresponding Curve25519 when mapping server tunnel proxy +IDs back to the Ed25519 proxy public key. + +The proxy is designed to be bundled with the tunnel-core client, run +optionally, and integrated with its tactics, data store, and logging. The +broker is designed to be bundled with the Psiphon server, psiphond, and, like +tactics requests, run under MeekServer; and use the tactics, psinet database, +GeoIP services, and logging services provided by psiphond. + +The build tag PSIPHON_ENABLE_INPROXY must be specified in order to enable +in-proxy components. Without this build tag, the components are disabled and +larger dependencies are not referenced and excluded from builds. +*/ +package inproxy diff --git a/psiphon/common/inproxy/dtls/dtls.go b/psiphon/common/inproxy/dtls/dtls.go new file mode 100644 index 000000000..9d87a6164 --- /dev/null +++ b/psiphon/common/inproxy/dtls/dtls.go @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package dtls + +import ( + "context" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +type dtlsSeedValue string + +const valueKey = dtlsSeedValue("DTLS-seed") + +// SetDTLSSeed establishes a cached common/prng seed to be used when +// randomizing DTLS Hellos. +// +// The seed is attached as a value to the input dial context, yielding the +// output context. This allows a fork of pion/dtls to fetch the seed, from a +// context, and apply randomization without having to fork many pion layers +// to pass in seeds. +// +// Both sides of a WebRTC connection may randomize their Hellos. isOffer +// allows the same seed to be used, but produce two distinct random streams. +// The client generates or replays an obfuscation secret used to derive the +// seed, and the obfuscation secret is relayed to the proxy by the Broker. +func SetDTLSSeed( + ctx context.Context, baseSeed *prng.Seed, isOffer bool) (context.Context, error) { + + salt := "inproxy-client-DTLS-seed" + if !isOffer { + salt = "inproxy-proxy-DTLS-seed" + } + + seed, err := prng.NewSaltedSeed(baseSeed, salt) + if err != nil { + return nil, errors.Trace(err) + } + + seedCtx := context.WithValue(ctx, valueKey, seed) + + return seedCtx, nil +} + +// SetNoDTLSSeed indicates to skip DTLS randomization for the given dial +// context. +func SetNoDTLSSeed(ctx context.Context) context.Context { + var nilSeed *prng.Seed + return context.WithValue(ctx, valueKey, nilSeed) +} + +// GetDTLSSeed fetches a seed established by SetDTLSSeed, or nil for no seed +// as set by SetNoDTLSSeed, or returns an error if no seed is configured +// specified dial context. +func GetDTLSSeed(ctx context.Context) (*prng.Seed, error) { + value := ctx.Value(valueKey) + if value == nil { + return nil, errors.TraceNew("missing seed") + } + return value.(*prng.Seed), nil +} diff --git a/psiphon/common/inproxy/inproxy_disabled.go b/psiphon/common/inproxy/inproxy_disabled.go new file mode 100644 index 000000000..71f62f3cf --- /dev/null +++ b/psiphon/common/inproxy/inproxy_disabled.go @@ -0,0 +1,175 @@ +//go:build !PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + std_errors "errors" + "net" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" +) + +// The inproxy package has a broad API that referenced throughout the psiphon +// and psiphon/server packages. +// +// When PSIPHON_ENABLE_INPROXY is not specified, inproxy components are +// disabled and large dependencies, including pion and tailscale, are not +// referenced and excluded from builds. The stub types and functions here are +// sufficient to omit all pion and tailscale references. The remaining, broad +// inproxy API surface is not stubbed out. +// +// Client/proxy and server/broker integrations in psiphon and psiphon/server +// should all check inproxy.Enabled and, when false, skip or fail early +// before trying to use inproxy components. + +// Enabled indicates if in-proxy functionality is enabled. +func Enabled() bool { + return false +} + +var errNotEnabled = std_errors.New("operation not enabled") + +const ( + dataChannelAwaitTimeout = time.Duration(0) +) + +type webRTCConn struct { +} + +type webRTCConfig struct { + Logger common.Logger + EnableDebugLogging bool + WebRTCDialCoordinator WebRTCDialCoordinator + ClientRootObfuscationSecret ObfuscationSecret + DoDTLSRandomization bool + TrafficShapingParameters *DataChannelTrafficShapingParameters + ReliableTransport bool +} + +func (conn *webRTCConn) SetRemoteSDP(peerSDP WebRTCSessionDescription) error { + return errors.Trace(errNotEnabled) +} + +// AwaitInitialDataChannel returns when the data channel is established, or +// when an error has occured. +func (conn *webRTCConn) AwaitInitialDataChannel(ctx context.Context) error { + return errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) Close() error { + return errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) IsClosed() bool { + return false +} + +func (conn *webRTCConn) Read(p []byte) (int, error) { + return 0, errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) Write(p []byte) (int, error) { + return 0, errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) LocalAddr() net.Addr { + return nil +} + +func (conn *webRTCConn) RemoteAddr() net.Addr { + return nil +} + +func (conn *webRTCConn) SetDeadline(t time.Time) error { + return errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) SetReadDeadline(t time.Time) error { + return errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) SetWriteDeadline(t time.Time) error { + return errors.Trace(errNotEnabled) +} + +func (conn *webRTCConn) GetMetrics() common.LogFields { + return nil +} + +type webRTCSDPMetrics struct { + iceCandidateTypes []ICECandidateType + hasIPv6 bool + filteredICECandidates []string +} + +func newWebRTCConnWithOffer( + ctx context.Context, + config *webRTCConfig) ( + *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { + return nil, WebRTCSessionDescription{}, nil, errors.Trace(errNotEnabled) +} + +func newWebRTCConnWithAnswer( + ctx context.Context, + config *webRTCConfig, + peerSDP WebRTCSessionDescription) ( + *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { + + return nil, WebRTCSessionDescription{}, nil, errors.Trace(errNotEnabled) +} + +func filterSDPAddresses( + encodedSDP []byte, + errorOnNoCandidates bool, + lookupGeoIP LookupGeoIP, + expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { + return nil, nil, errors.Trace(errNotEnabled) +} + +func initPortMapper(coordinator WebRTCDialCoordinator) { +} + +func probePortMapping( + ctx context.Context, + logger common.Logger) (PortMappingTypes, error) { + + return nil, errors.Trace(errNotEnabled) +} + +func discoverNATMapping( + ctx context.Context, + conn net.PacketConn, + serverAddress string) (NATMapping, error) { + + return NATMappingUnknown, errors.Trace(errNotEnabled) +} + +func discoverNATFiltering( + ctx context.Context, + conn net.PacketConn, + serverAddress string) (NATFiltering, error) { + + return NATFilteringUnknown, errors.Trace(errNotEnabled) +} diff --git a/psiphon/common/inproxy/inproxy_enabled.go b/psiphon/common/inproxy/inproxy_enabled.go new file mode 100644 index 000000000..588fc3d0f --- /dev/null +++ b/psiphon/common/inproxy/inproxy_enabled.go @@ -0,0 +1,27 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +// Enabled indicates if in-proxy functionality is enabled. +func Enabled() bool { + return true +} diff --git a/psiphon/common/inproxy/inproxy_test.go b/psiphon/common/inproxy/inproxy_test.go new file mode 100644 index 000000000..c5ec42fe2 --- /dev/null +++ b/psiphon/common/inproxy/inproxy_test.go @@ -0,0 +1,992 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + _ "net/http/pprof" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/quic" + "golang.org/x/sync/errgroup" +) + +func TestInproxy(t *testing.T) { + err := runTestInproxy() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestInproxy() error { + + // Note: use the environment variable PION_LOG_TRACE=all to emit WebRTC logging. + + numProxies := 5 + proxyMaxClients := 3 + numClients := 10 + + bytesToSend := 1 << 20 + targetElapsedSeconds := 2 + + baseAPIParameters := common.APIParameters{ + "sponsor_id": strings.ToUpper(prng.HexString(8)), + "client_platform": "test-client-platform", + } + + testCompartmentID, _ := MakeID() + testCommonCompartmentIDs := []ID{testCompartmentID} + + testNetworkID := "NETWORK-ID-1" + testNetworkType := NetworkTypeUnknown + testNATType := NATTypeUnknown + testSTUNServerAddress := "stun.nextcloud.com:443" + testDisableSTUN := false + + testNewTacticsPayload := []byte(prng.HexString(100)) + testNewTacticsTag := "new-tactics-tag" + testUnchangedTacticsPayload := []byte(prng.HexString(100)) + + // TODO: test port mapping + + stunServerAddressSucceededCount := int32(0) + stunServerAddressSucceeded := func(bool, string) { atomic.AddInt32(&stunServerAddressSucceededCount, 1) } + stunServerAddressFailedCount := int32(0) + stunServerAddressFailed := func(bool, string) { atomic.AddInt32(&stunServerAddressFailedCount, 1) } + + roundTripperSucceededCount := int32(0) + roundTripperSucceded := func(RoundTripper) { atomic.AddInt32(&roundTripperSucceededCount, 1) } + roundTripperFailedCount := int32(0) + roundTripperFailed := func(RoundTripper) { atomic.AddInt32(&roundTripperFailedCount, 1) } + + testCtx, stopTest := context.WithCancel(context.Background()) + defer stopTest() + + testGroup := new(errgroup.Group) + + // Enable test to run without requiring host firewall exceptions + SetAllowBogonWebRTCConnections(true) + defer SetAllowBogonWebRTCConnections(false) + + // Init logging and profiling + + logger := newTestLogger() + + pprofListener, err := net.Listen("tcp", "127.0.0.1:0") + go http.Serve(pprofListener, nil) + defer pprofListener.Close() + logger.WithTrace().Info(fmt.Sprintf("PPROF: http://%s/debug/pprof", pprofListener.Addr())) + + // Start echo servers + + tcpEchoListener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return errors.Trace(err) + } + defer tcpEchoListener.Close() + go runTCPEchoServer(tcpEchoListener) + + // QUIC tests UDP proxying, and provides reliable delivery of echoed data + quicEchoServer, err := newQuicEchoServer() + if err != nil { + return errors.Trace(err) + } + defer quicEchoServer.Close() + go quicEchoServer.Run() + + // Create signed server entry with capability + + serverPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + serverPublicKey, err := serverPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + serverRootObfuscationSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return errors.Trace(err) + } + + serverEntry := make(protocol.ServerEntryFields) + serverEntry["ipAddress"] = "127.0.0.1" + _, tcpPort, _ := net.SplitHostPort(tcpEchoListener.Addr().String()) + _, udpPort, _ := net.SplitHostPort(quicEchoServer.Addr().String()) + serverEntry["inproxyOSSHPort"], _ = strconv.Atoi(tcpPort) + serverEntry["inproxyQUICPort"], _ = strconv.Atoi(udpPort) + serverEntry["capabilities"] = []string{"INPROXY-WEBRTC-OSSH", "INPROXY-WEBRTC-QUIC-OSSH"} + serverEntry["inproxySessionPublicKey"] = base64.RawStdEncoding.EncodeToString(serverPublicKey[:]) + serverEntry["inproxySessionRootObfuscationSecret"] = base64.RawStdEncoding.EncodeToString(serverRootObfuscationSecret[:]) + testServerEntryTag := prng.HexString(16) + serverEntry["tag"] = testServerEntryTag + + serverEntrySignaturePublicKey, serverEntrySignaturePrivateKey, err := + protocol.NewServerEntrySignatureKeyPair() + if err != nil { + return errors.Trace(err) + } + err = serverEntry.AddSignature(serverEntrySignaturePublicKey, serverEntrySignaturePrivateKey) + if err != nil { + return errors.Trace(err) + } + + packedServerEntryFields, err := protocol.EncodePackedServerEntryFields(serverEntry) + if err != nil { + return errors.Trace(err) + } + packedDestinationServerEntry, err := protocol.CBOREncoding.Marshal(packedServerEntryFields) + if err != nil { + return errors.Trace(err) + } + + // Start broker + + logger.WithTrace().Info("START BROKER") + + brokerPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + brokerPublicKey, err := brokerPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + brokerRootObfuscationSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return errors.Trace(err) + } + + brokerListener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return errors.Trace(err) + } + defer brokerListener.Close() + + brokerConfig := &BrokerConfig{ + + Logger: logger, + + CommonCompartmentIDs: testCommonCompartmentIDs, + + APIParameterValidator: func(params common.APIParameters) error { + if len(params) != len(baseAPIParameters) { + return errors.TraceNew("unexpected base API parameter count") + } + for name, value := range params { + if value.(string) != baseAPIParameters[name].(string) { + return errors.Tracef( + "unexpected base API parameter: %v: %v != %v", + name, + value.(string), + baseAPIParameters[name].(string)) + } + } + return nil + }, + + APIParameterLogFieldFormatter: func( + geoIPData common.GeoIPData, params common.APIParameters) common.LogFields { + return common.LogFields(params) + }, + + GetTactics: func(_ common.GeoIPData, _ common.APIParameters) ([]byte, string, error) { + // Exercise both new and unchanged tactics + if prng.FlipCoin() { + return testNewTacticsPayload, testNewTacticsTag, nil + } + return testUnchangedTacticsPayload, "", nil + }, + + IsValidServerEntryTag: func(serverEntryTag string) bool { return serverEntryTag == testServerEntryTag }, + + PrivateKey: brokerPrivateKey, + + ObfuscationRootSecret: brokerRootObfuscationSecret, + + ServerEntrySignaturePublicKey: serverEntrySignaturePublicKey, + + AllowProxy: func(common.GeoIPData) bool { return true }, + AllowClient: func(common.GeoIPData) bool { return true }, + AllowDomainFrontedDestinations: func(common.GeoIPData) bool { return true }, + } + + broker, err := NewBroker(brokerConfig) + if err != nil { + return errors.Trace(err) + } + + err = broker.Start() + if err != nil { + return errors.Trace(err) + } + defer broker.Stop() + + testGroup.Go(func() error { + err := runHTTPServer(brokerListener, broker) + if testCtx.Err() != nil { + return nil + } + return errors.Trace(err) + }) + + // Stub server broker request handler (in Psiphon, this will be the + // destination Psiphon server; here, it's not necessary to build this + // handler into the destination echo server) + + serverSessions, err := NewServerBrokerSessions( + serverPrivateKey, serverRootObfuscationSecret, []SessionPublicKey{brokerPublicKey}) + if err != nil { + return errors.Trace(err) + } + + var pendingBrokerServerReportsMutex sync.Mutex + pendingBrokerServerReports := make(map[ID]bool) + + addPendingBrokerServerReport := func(connectionID ID) { + pendingBrokerServerReportsMutex.Lock() + defer pendingBrokerServerReportsMutex.Unlock() + pendingBrokerServerReports[connectionID] = true + } + + hasPendingBrokerServerReports := func() bool { + pendingBrokerServerReportsMutex.Lock() + defer pendingBrokerServerReportsMutex.Unlock() + return len(pendingBrokerServerReports) > 0 + } + + handleBrokerServerReports := func(in []byte, clientConnectionID ID) ([]byte, error) { + + handler := func(brokerVerifiedOriginalClientIP string, logFields common.LogFields) { + pendingBrokerServerReportsMutex.Lock() + defer pendingBrokerServerReportsMutex.Unlock() + + // Mark the report as no longer outstanding + delete(pendingBrokerServerReports, clientConnectionID) + } + + out, err := serverSessions.HandlePacket(logger, in, clientConnectionID, handler) + return out, errors.Trace(err) + } + + // Check that the tactics round trip succeeds + + var pendingProxyTacticsCallbacksMutex sync.Mutex + pendingProxyTacticsCallbacks := make(map[SessionPrivateKey]bool) + + addPendingProxyTacticsCallback := func(proxyPrivateKey SessionPrivateKey) { + pendingProxyTacticsCallbacksMutex.Lock() + defer pendingProxyTacticsCallbacksMutex.Unlock() + pendingProxyTacticsCallbacks[proxyPrivateKey] = true + } + + hasPendingProxyTacticsCallbacks := func() bool { + pendingProxyTacticsCallbacksMutex.Lock() + defer pendingProxyTacticsCallbacksMutex.Unlock() + return len(pendingProxyTacticsCallbacks) > 0 + } + + makeHandleTacticsPayload := func( + proxyPrivateKey SessionPrivateKey, + tacticsNetworkID string) func(_ string, _ []byte) bool { + + return func(networkID string, tacticsPayload []byte) bool { + pendingProxyTacticsCallbacksMutex.Lock() + defer pendingProxyTacticsCallbacksMutex.Unlock() + + // Check that the correct networkID is passed around; if not, + // skip the delete, which will fail the test + if networkID == tacticsNetworkID { + + // Certain state is reset when new tactics are applied -- the + // return true case; exercise both cases + if bytes.Equal(tacticsPayload, testNewTacticsPayload) { + delete(pendingProxyTacticsCallbacks, proxyPrivateKey) + return true + } + if bytes.Equal(tacticsPayload, testUnchangedTacticsPayload) { + delete(pendingProxyTacticsCallbacks, proxyPrivateKey) + return false + } + } + panic("unexpected tactics payload") + } + } + + // Start proxies + + logger.WithTrace().Info("START PROXIES") + + for i := 0; i < numProxies; i++ { + + proxyPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + + brokerCoordinator := &testBrokerDialCoordinator{ + networkID: testNetworkID, + networkType: testNetworkType, + brokerClientPrivateKey: proxyPrivateKey, + brokerPublicKey: brokerPublicKey, + brokerRootObfuscationSecret: brokerRootObfuscationSecret, + brokerClientRoundTripper: newHTTPRoundTripper( + brokerListener.Addr().String(), "proxy"), + brokerClientRoundTripperSucceeded: roundTripperSucceded, + brokerClientRoundTripperFailed: roundTripperFailed, + } + + webRTCCoordinator := &testWebRTCDialCoordinator{ + networkID: testNetworkID, + networkType: testNetworkType, + natType: testNATType, + disableSTUN: testDisableSTUN, + stunServerAddress: testSTUNServerAddress, + stunServerAddressRFC5780: testSTUNServerAddress, + stunServerAddressSucceeded: stunServerAddressSucceeded, + stunServerAddressFailed: stunServerAddressFailed, + setNATType: func(NATType) {}, + setPortMappingTypes: func(PortMappingTypes) {}, + bindToDevice: func(int) error { return nil }, + } + + // Each proxy has its own broker client + brokerClient, err := NewBrokerClient(brokerCoordinator) + if err != nil { + return errors.Trace(err) + } + + tacticsNetworkID := prng.HexString(32) + + proxy, err := NewProxy(&ProxyConfig{ + + Logger: logger, + + WaitForNetworkConnectivity: func() bool { + return true + }, + + GetBrokerClient: func() (*BrokerClient, error) { + return brokerClient, nil + }, + + GetBaseAPIParameters: func() (common.APIParameters, string, error) { + return baseAPIParameters, tacticsNetworkID, nil + }, + + MakeWebRTCDialCoordinator: func() (WebRTCDialCoordinator, error) { + return webRTCCoordinator, nil + }, + + HandleTacticsPayload: makeHandleTacticsPayload(proxyPrivateKey, tacticsNetworkID), + + MaxClients: proxyMaxClients, + LimitUpstreamBytesPerSecond: bytesToSend / targetElapsedSeconds, + LimitDownstreamBytesPerSecond: bytesToSend / targetElapsedSeconds, + + ActivityUpdater: func(connectingClients int32, connectedClients int32, + bytesUp int64, bytesDown int64, bytesDuration time.Duration) { + + fmt.Printf("[%s] ACTIVITY: %d connecting, %d connected, %d up, %d down\n", + time.Now().UTC().Format(time.RFC3339), + connectingClients, connectedClients, bytesUp, bytesDown) + }, + }) + if err != nil { + return errors.Trace(err) + } + + addPendingProxyTacticsCallback(proxyPrivateKey) + + testGroup.Go(func() error { + proxy.Run(testCtx) + return nil + }) + } + + // Await proxy announcements before starting clients + // + // - Announcements may delay due to proxyAnnounceRetryDelay in Proxy.Run, + // plus NAT discovery + // + // - Don't wait for > numProxies announcements due to + // InitiatorSessions.NewRoundTrip waitToShareSession limitation + + for { + time.Sleep(100 * time.Millisecond) + broker.matcher.announcementQueueMutex.Lock() + n := broker.matcher.announcementQueue.Len() + broker.matcher.announcementQueueMutex.Unlock() + if n >= numProxies { + break + } + } + + // Start clients + + logger.WithTrace().Info("START CLIENTS") + + clientsGroup := new(errgroup.Group) + + makeClientFunc := func( + isTCP bool, + isMobile bool, + brokerClient *BrokerClient, + webRTCCoordinator WebRTCDialCoordinator) func() error { + + var networkProtocol NetworkProtocol + var addr string + var wrapWithQUIC bool + if isTCP { + networkProtocol = NetworkProtocolTCP + addr = tcpEchoListener.Addr().String() + } else { + networkProtocol = NetworkProtocolUDP + addr = quicEchoServer.Addr().String() + wrapWithQUIC = true + } + + return func() error { + + dialCtx, cancelDial := context.WithTimeout(testCtx, 60*time.Second) + defer cancelDial() + + conn, err := DialClient( + dialCtx, + &ClientConfig{ + Logger: logger, + BaseAPIParameters: baseAPIParameters, + BrokerClient: brokerClient, + WebRTCDialCoordinator: webRTCCoordinator, + ReliableTransport: isTCP, + DialNetworkProtocol: networkProtocol, + DialAddress: addr, + PackedDestinationServerEntry: packedDestinationServerEntry, + }) + if err != nil { + return errors.Trace(err) + } + + var relayConn net.Conn + relayConn = conn + + if wrapWithQUIC { + quicConn, err := quic.Dial( + dialCtx, + conn, + &net.UDPAddr{Port: 1}, // This address is ignored, but the zero value is not allowed + "test", "QUICv1", nil, quicEchoServer.ObfuscationKey(), nil, nil, true) + if err != nil { + return errors.Trace(err) + } + relayConn = quicConn + } + + addPendingBrokerServerReport(conn.GetConnectionID()) + signalRelayComplete := make(chan struct{}) + + clientsGroup.Go(func() error { + defer close(signalRelayComplete) + + in := conn.InitialRelayPacket() + for in != nil { + out, err := handleBrokerServerReports(in, conn.GetConnectionID()) + if err != nil { + if out == nil { + return errors.Trace(err) + } else { + fmt.Printf("HandlePacket returned packet and error: %v\n", err) + // Proceed with reset session token packet + } + } + + if out == nil { + // Relay is complete + break + } + + in, err = conn.RelayPacket(testCtx, out) + if err != nil { + return errors.Trace(err) + } + } + + return nil + }) + + sendBytes := prng.Bytes(bytesToSend) + + clientsGroup.Go(func() error { + for n := 0; n < bytesToSend; { + m := prng.Range(1024, 32768) + if bytesToSend-n < m { + m = bytesToSend - n + } + _, err := relayConn.Write(sendBytes[n : n+m]) + if err != nil { + return errors.Trace(err) + } + n += m + } + fmt.Printf("%d bytes sent\n", bytesToSend) + return nil + }) + + clientsGroup.Go(func() error { + buf := make([]byte, 32768) + n := 0 + for n < bytesToSend { + m, err := relayConn.Read(buf) + if err != nil { + return errors.Trace(err) + } + if !bytes.Equal(sendBytes[n:n+m], buf[:m]) { + return errors.Tracef( + "unexpected bytes: expected at index %d, received at index %d", + bytes.Index(sendBytes, buf[:m]), n) + } + n += m + } + fmt.Printf("%d bytes received\n", bytesToSend) + + select { + case <-signalRelayComplete: + case <-testCtx.Done(): + } + + relayConn.Close() + conn.Close() + + return nil + }) + + return nil + } + } + + newClientParams := func(isMobile bool) (*BrokerClient, *testWebRTCDialCoordinator, error) { + + clientPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return nil, nil, errors.Trace(err) + } + + clientRootObfuscationSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return nil, nil, errors.Trace(err) + } + + brokerCoordinator := &testBrokerDialCoordinator{ + networkID: testNetworkID, + networkType: testNetworkType, + + commonCompartmentIDs: testCommonCompartmentIDs, + + brokerClientPrivateKey: clientPrivateKey, + brokerPublicKey: brokerPublicKey, + brokerRootObfuscationSecret: brokerRootObfuscationSecret, + brokerClientRoundTripper: newHTTPRoundTripper( + brokerListener.Addr().String(), "client"), + brokerClientRoundTripperSucceeded: roundTripperSucceded, + brokerClientRoundTripperFailed: roundTripperFailed, + } + + webRTCCoordinator := &testWebRTCDialCoordinator{ + networkID: testNetworkID, + networkType: testNetworkType, + + natType: testNATType, + disableSTUN: testDisableSTUN, + stunServerAddress: testSTUNServerAddress, + stunServerAddressRFC5780: testSTUNServerAddress, + stunServerAddressSucceeded: stunServerAddressSucceeded, + stunServerAddressFailed: stunServerAddressFailed, + + clientRootObfuscationSecret: clientRootObfuscationSecret, + doDTLSRandomization: prng.FlipCoin(), + trafficShapingParameters: &DataChannelTrafficShapingParameters{ + MinPaddedMessages: 0, + MaxPaddedMessages: 10, + MinPaddingSize: 0, + MaxPaddingSize: 1500, + MinDecoyMessages: 0, + MaxDecoyMessages: 10, + MinDecoySize: 1, + MaxDecoySize: 1500, + DecoyMessageProbability: 0.5, + }, + + setNATType: func(NATType) {}, + setPortMappingTypes: func(PortMappingTypes) {}, + bindToDevice: func(int) error { return nil }, + + // With STUN enabled (testDisableSTUN = false), there are cases + // where the WebRTC Data Channel is not successfully established. + // With a short enough timeout here, clients will redial and + // eventually succceed. + webRTCAwaitDataChannelTimeout: 5 * time.Second, + } + + if isMobile { + webRTCCoordinator.networkType = NetworkTypeMobile + webRTCCoordinator.disableInboundForMobileNetworks = true + } + + brokerClient, err := NewBrokerClient(brokerCoordinator) + if err != nil { + return nil, nil, errors.Trace(err) + } + + return brokerClient, webRTCCoordinator, nil + } + + clientBrokerClient, clientWebRTCCoordinator, err := newClientParams(false) + if err != nil { + return errors.Trace(err) + } + + clientMobileBrokerClient, clientMobileWebRTCCoordinator, err := newClientParams(true) + if err != nil { + return errors.Trace(err) + } + + for i := 0; i < numClients; i++ { + + // Test a mix of TCP and UDP proxying; also test the + // DisableInboundForMobileNetworks code path. + + isTCP := i%2 == 0 + isMobile := i%4 == 0 + + // Exercise BrokerClients shared by multiple clients, but also create + // several broker clients. + if i%8 == 0 { + clientBrokerClient, clientWebRTCCoordinator, err = newClientParams(false) + if err != nil { + return errors.Trace(err) + } + + clientMobileBrokerClient, clientMobileWebRTCCoordinator, err = newClientParams(true) + if err != nil { + return errors.Trace(err) + } + } + + brokerClient := clientBrokerClient + webRTCCoordinator := clientWebRTCCoordinator + if isMobile { + brokerClient = clientMobileBrokerClient + webRTCCoordinator = clientMobileWebRTCCoordinator + } + + clientsGroup.Go(makeClientFunc(isTCP, isMobile, brokerClient, webRTCCoordinator)) + } + + // Await client transfers complete + + logger.WithTrace().Info("AWAIT DATA TRANSFER") + + err = clientsGroup.Wait() + if err != nil { + return errors.Trace(err) + } + + logger.WithTrace().Info("DONE DATA TRANSFER") + + if hasPendingBrokerServerReports() { + return errors.TraceNew("unexpected pending broker server requests") + } + + if hasPendingProxyTacticsCallbacks() { + return errors.TraceNew("unexpected pending proxy tactics callback") + } + + // TODO: check that elapsed time is consistent with rate limit (+/-) + + // Check if STUN server replay callbacks were triggered + if !testDisableSTUN { + if atomic.LoadInt32(&stunServerAddressSucceededCount) < 1 { + return errors.TraceNew("unexpected STUN server succeeded count") + } + } + if atomic.LoadInt32(&stunServerAddressFailedCount) > 0 { + return errors.TraceNew("unexpected STUN server failed count") + } + + // Check if RoundTripper server replay callbacks were triggered + if atomic.LoadInt32(&roundTripperSucceededCount) < 1 { + return errors.TraceNew("unexpected round tripper succeeded count") + } + if atomic.LoadInt32(&roundTripperFailedCount) > 0 { + return errors.TraceNew("unexpected round tripper failed count") + } + + // Await shutdowns + + stopTest() + brokerListener.Close() + + err = testGroup.Wait() + if err != nil { + return errors.Trace(err) + } + + return nil +} + +func runHTTPServer(listener net.Listener, broker *Broker) error { + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + // For this test, clients set the path to "/client" and proxies + // set the path to "/proxy" and we use that to create stub GeoIP + // data to pass the not-same-ASN condition. + var geoIPData common.GeoIPData + geoIPData.ASN = r.URL.Path + + requestPayload, err := ioutil.ReadAll( + http.MaxBytesReader(w, r.Body, BrokerMaxRequestBodySize)) + if err != nil { + fmt.Printf("runHTTPServer ioutil.ReadAll failed: %v\n", err) + http.Error(w, "", http.StatusNotFound) + return + } + + clientIP, _, _ := net.SplitHostPort(r.RemoteAddr) + + extendTimeout := func(timeout time.Duration) { + // TODO: set insufficient initial timeout, so extension is + // required for success + http.NewResponseController(w).SetWriteDeadline(time.Now().Add(timeout)) + } + + responsePayload, err := broker.HandleSessionPacket( + r.Context(), + extendTimeout, + nil, + clientIP, + geoIPData, + requestPayload) + if err != nil { + fmt.Printf("runHTTPServer HandleSessionPacket failed: %v\n", err) + http.Error(w, "", http.StatusNotFound) + return + } + + w.WriteHeader(http.StatusOK) + w.Write(responsePayload) + }) + + // WriteTimeout will be extended via extendTimeout. + httpServer := &http.Server{ + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 1 * time.Minute, + Handler: handler, + } + + certificate, privateKey, _, err := common.GenerateWebServerCertificate("www.example.com") + if err != nil { + return errors.Trace(err) + } + tlsCert, err := tls.X509KeyPair([]byte(certificate), []byte(privateKey)) + if err != nil { + return errors.Trace(err) + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + } + + err = httpServer.Serve(tls.NewListener(listener, tlsConfig)) + return errors.Trace(err) +} + +type httpRoundTripper struct { + httpClient *http.Client + endpointAddr string + path string +} + +func newHTTPRoundTripper(endpointAddr string, path string) *httpRoundTripper { + return &httpRoundTripper{ + httpClient: &http.Client{ + Transport: &http.Transport{ + ForceAttemptHTTP2: true, + MaxIdleConns: 2, + IdleConnTimeout: 1 * time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + }, + endpointAddr: endpointAddr, + path: path, + } +} + +func (r *httpRoundTripper) RoundTrip( + ctx context.Context, + roundTripDelay time.Duration, + roundTripTimeout time.Duration, + requestPayload []byte) ([]byte, error) { + + if roundTripDelay > 0 { + common.SleepWithContext(ctx, roundTripDelay) + } + + requestCtx, requestCancelFunc := context.WithTimeout(ctx, roundTripTimeout) + defer requestCancelFunc() + + url := fmt.Sprintf("https://%s/%s", r.endpointAddr, r.path) + + request, err := http.NewRequestWithContext( + requestCtx, "POST", url, bytes.NewReader(requestPayload)) + if err != nil { + return nil, errors.Trace(err) + } + + response, err := r.httpClient.Do(request) + if err != nil { + return nil, errors.Trace(err) + } + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + return nil, errors.Tracef("unexpected response status code: %d", response.StatusCode) + } + + responsePayload, err := io.ReadAll(response.Body) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +func (r *httpRoundTripper) Close() error { + r.httpClient.CloseIdleConnections() + return nil +} + +func runTCPEchoServer(listener net.Listener) { + + for { + conn, err := listener.Accept() + if err != nil { + fmt.Printf("runTCPEchoServer failed: %v\n", errors.Trace(err)) + return + } + go func(conn net.Conn) { + buf := make([]byte, 32768) + for { + n, err := conn.Read(buf) + if n > 0 { + _, err = conn.Write(buf[:n]) + } + if err != nil { + fmt.Printf("runTCPEchoServer failed: %v\n", errors.Trace(err)) + return + } + } + }(conn) + } +} + +type quicEchoServer struct { + listener net.Listener + obfuscationKey string +} + +func newQuicEchoServer() (*quicEchoServer, error) { + + obfuscationKey := prng.HexString(32) + + listener, err := quic.Listen( + nil, + nil, + "127.0.0.1:0", + obfuscationKey, + false) + if err != nil { + return nil, errors.Trace(err) + } + + return &quicEchoServer{ + listener: listener, + obfuscationKey: obfuscationKey, + }, nil +} + +func (q *quicEchoServer) ObfuscationKey() string { + return q.obfuscationKey +} + +func (q *quicEchoServer) Close() error { + return q.listener.Close() +} + +func (q *quicEchoServer) Addr() net.Addr { + return q.listener.Addr() +} + +func (q *quicEchoServer) Run() { + + for { + conn, err := q.listener.Accept() + if err != nil { + fmt.Printf("quicEchoServer failed: %v\n", errors.Trace(err)) + return + } + go func(conn net.Conn) { + buf := make([]byte, 32768) + for { + n, err := conn.Read(buf) + if n > 0 { + _, err = conn.Write(buf[:n]) + } + if err != nil { + fmt.Printf("quicEchoServer failed: %v\n", errors.Trace(err)) + return + } + } + }(conn) + } +} diff --git a/psiphon/common/inproxy/matcher.go b/psiphon/common/inproxy/matcher.go new file mode 100644 index 000000000..c903c6762 --- /dev/null +++ b/psiphon/common/inproxy/matcher.go @@ -0,0 +1,1089 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package inproxy + +import ( + "context" + std_errors "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + lrucache "github.com/cognusion/go-cache-lru" + "github.com/gammazero/deque" + "golang.org/x/time/rate" +) + +// TTLs should be aligned with STUN hole punch lifetimes. + +const ( + matcherAnnouncementQueueMaxSize = 5000000 + matcherOfferQueueMaxSize = 5000000 + matcherPendingAnswersTTL = 30 * time.Second + matcherPendingAnswersMaxSize = 100000 + + matcherRateLimiterReapHistoryFrequencySeconds = 300 + matcherRateLimiterMaxCacheEntries = 1000000 +) + +// Matcher matches proxy announcements with client offers. Matcher also +// coordinates pending proxy answers and routes answers to the awaiting +// client offer handler. +// +// Matching prioritizes selecting the oldest announcements and client offers, +// as they are closest to timing out. +// +// The client and proxy must supply matching personal or common compartment +// IDs. Personal compartment matching is preferred. Common compartments are +// managed by Psiphon and can be obtained via a tactics parameter or via an +// OSL embedding. +// +// A client may opt form personal-only matching by not supplying any common +// compartment IDs. +// +// Matching prefers to pair proxies and clients in a way that maximizes total +// possible matches. For a client or proxy with less-limited NAT traversal, a +// pairing with more-limited NAT traversal is preferred; and vice versa. +// Candidates with unknown NAT types and mobile network types are assumed to +// have the most limited NAT traversal capability. +// +// Preferred matchings take priority over announcement age. +// +// The client and proxy will not match if they are in the same country and +// ASN, as it's assumed that doesn't provide any blocking circumvention +// benefit. Disallowing proxies in certain blocked countries is handled at a +// higher level; any such proxies should not be enqueued for matching. +type Matcher struct { + config *MatcherConfig + + runMutex sync.Mutex + runContext context.Context + stopRunning context.CancelFunc + waitGroup *sync.WaitGroup + + // The announcement queue is implicitly sorted by announcement age. The + // count fields are used to skip searching deeper into the queue for + // preferred matches. + + // TODO: replace queue and counts with an indexed, in-memory database? + + announcementQueueMutex sync.Mutex + announcementQueue *deque.Deque[*announcementEntry] + announcementQueueEntryCountByIP map[string]int + announcementQueueRateLimiters *lrucache.Cache + announcementLimitEntryCount int + announcementRateLimitQuantity int + announcementRateLimitInterval time.Duration + announcementNonlimitedProxyIDs map[ID]struct{} + announcementsPersonalCompartmentalizedCount int + announcementsUnlimitedNATCount int + announcementsPartiallyLimitedNATCount int + announcementsStrictlyLimitedNATCount int + + // The offer queue is also implicitly sorted by offer age. Both an offer + // and announcement queue are required since either announcements or + // offers can arrive while there are no available pairings. + + offerQueueMutex sync.Mutex + offerQueue *deque.Deque[*offerEntry] + offerQueueEntryCountByIP map[string]int + offerQueueRateLimiters *lrucache.Cache + offerLimitEntryCount int + offerRateLimitQuantity int + offerRateLimitInterval time.Duration + + matchSignal chan struct{} + + pendingAnswers *lrucache.Cache +} + +// MatchProperties specifies the compartment, GeoIP, and network topology +// matching roperties of clients and proxies. +type MatchProperties struct { + CommonCompartmentIDs []ID + PersonalCompartmentIDs []ID + GeoIPData common.GeoIPData + NetworkType NetworkType + NATType NATType + PortMappingTypes PortMappingTypes +} + +// EffectiveNATType combines the set of network properties into an effective +// NAT type. When a port mapping is offered, a NAT type with unlimiter NAT +// traversal is assumed. When NAT type is unknown and the network type is +// mobile, CGNAT with limited NAT traversal is assumed. +func (p *MatchProperties) EffectiveNATType() NATType { + + if p.PortMappingTypes.Available() { + return NATTypePortMapping + } + + // TODO: can a peer have limited NAT travseral for IPv4 and also have a + // publicly reachable IPv6 ICE host candidate? If so, change the + // effective NAT type? Depends on whether the matched peer can use IPv6. + + if p.NATType == NATTypeUnknown && p.NetworkType == NetworkTypeMobile { + return NATTypeMobileNetwork + } + + return p.NATType +} + +// ExistsPreferredNATMatch indicates whether there exists a preferred NAT +// matching given the types of pairing candidates available. +func (p *MatchProperties) ExistsPreferredNATMatch( + unlimitedNAT, partiallyLimitedNAT, limitedNAT bool) bool { + + return p.EffectiveNATType().ExistsPreferredMatch( + unlimitedNAT, partiallyLimitedNAT, limitedNAT) +} + +// IsPreferredNATMatch indicates whether the peer candidate is a preferred +// NAT matching. +func (p *MatchProperties) IsPreferredNATMatch( + peerMatchProperties *MatchProperties) bool { + + return p.EffectiveNATType().IsPreferredMatch( + peerMatchProperties.EffectiveNATType()) +} + +// IsPersonalCompartmentalized indicates whether the candidate has personal +// compartment IDs. +func (p *MatchProperties) IsPersonalCompartmentalized() bool { + return len(p.PersonalCompartmentIDs) > 0 +} + +// MatchAnnouncement is a proxy announcement to be queued for matching. +type MatchAnnouncement struct { + Properties MatchProperties + ProxyID ID + ConnectionID ID + ProxyProtocolVersion int32 +} + +// MatchOffer is a client offer to be queued for matching. +type MatchOffer struct { + Properties MatchProperties + ClientProxyProtocolVersion int32 + ClientOfferSDP WebRTCSessionDescription + ClientRootObfuscationSecret ObfuscationSecret + DoDTLSRandomization bool + TrafficShapingParameters *DataChannelTrafficShapingParameters + NetworkProtocol NetworkProtocol + DestinationAddress string + DestinationServerID string +} + +// MatchAnswer is a proxy answer, the proxy's follow up to a matched +// announcement, to be routed to the awaiting client offer. +type MatchAnswer struct { + ProxyIP string + ProxyID ID + ConnectionID ID + SelectedProxyProtocolVersion int32 + ProxyAnswerSDP WebRTCSessionDescription +} + +// MatchMetrics records statistics about the match queue state at the time a +// match is made. +type MatchMetrics struct { + OfferMatchIndex int + OfferQueueSize int + AnnouncementMatchIndex int + AnnouncementQueueSize int +} + +// GetMetrics converts MatchMetrics to loggable fields. +func (metrics *MatchMetrics) GetMetrics() common.LogFields { + if metrics == nil { + return nil + } + return common.LogFields{ + "offer_match_index": metrics.OfferMatchIndex, + "offer_queue_size": metrics.OfferQueueSize, + "announcement_match_index": metrics.AnnouncementMatchIndex, + "announcement_queue_size": metrics.AnnouncementQueueSize, + } +} + +// announcementEntry is an announcement queue entry, an announcement with its +// associated lifetime context and signaling channel. +type announcementEntry struct { + ctx context.Context + limitIP string + announcement *MatchAnnouncement + offerChan chan *MatchOffer + matchMetrics atomic.Value +} + +func (announcementEntry *announcementEntry) getMatchMetrics() *MatchMetrics { + matchMetrics, _ := announcementEntry.matchMetrics.Load().(*MatchMetrics) + return matchMetrics +} + +// offerEntry is an offer queue entry, an offer with its associated lifetime +// context and signaling channel. +type offerEntry struct { + ctx context.Context + limitIP string + offer *MatchOffer + answerChan chan *answerInfo + matchMetrics atomic.Value +} + +func (offerEntry *offerEntry) getMatchMetrics() *MatchMetrics { + matchMetrics, _ := offerEntry.matchMetrics.Load().(*MatchMetrics) + return matchMetrics +} + +// answerInfo is an answer and its associated announcement. +type answerInfo struct { + announcement *MatchAnnouncement + answer *MatchAnswer +} + +// pendingAnswer represents an answer that is expected to arrive from a +// proxy. +type pendingAnswer struct { + announcement *MatchAnnouncement + answerChan chan *answerInfo +} + +// MatcherConfig specifies the configuration for a matcher. +type MatcherConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // Accouncement queue limits. + AnnouncementLimitEntryCount int + AnnouncementRateLimitQuantity int + AnnouncementRateLimitInterval time.Duration + AnnouncementNonlimitedProxyIDs []ID + + // Offer queue limits. + OfferLimitEntryCount int + OfferRateLimitQuantity int + OfferRateLimitInterval time.Duration +} + +// NewMatcher creates a new Matcher. +func NewMatcher(config *MatcherConfig) *Matcher { + + m := &Matcher{ + config: config, + + waitGroup: new(sync.WaitGroup), + + announcementQueue: deque.New[*announcementEntry](), + announcementQueueEntryCountByIP: make(map[string]int), + announcementQueueRateLimiters: lrucache.NewWithLRU( + 0, + time.Duration(matcherRateLimiterReapHistoryFrequencySeconds)*time.Second, + matcherRateLimiterMaxCacheEntries), + + offerQueue: deque.New[*offerEntry](), + offerQueueEntryCountByIP: make(map[string]int), + offerQueueRateLimiters: lrucache.NewWithLRU( + 0, + time.Duration(matcherRateLimiterReapHistoryFrequencySeconds)*time.Second, + matcherRateLimiterMaxCacheEntries), + + matchSignal: make(chan struct{}, 1), + + // matcherPendingAnswersTTL is not configurable; it supplies a default + // that is expected to be ignored when each entry's TTL is set to the + // Offer ctx timeout. + + pendingAnswers: lrucache.NewWithLRU( + matcherPendingAnswersTTL, + 1*time.Minute, + matcherPendingAnswersMaxSize), + } + + m.SetLimits( + config.AnnouncementLimitEntryCount, + config.AnnouncementRateLimitQuantity, + config.AnnouncementRateLimitInterval, + config.AnnouncementNonlimitedProxyIDs, + config.OfferLimitEntryCount, + config.OfferRateLimitQuantity, + config.OfferRateLimitInterval) + + return m +} + +// SetLimits sets new queue limits, replacing the previous configuration. +// Existing, cached rate limiters retain their existing rate limit state. New +// entries will use the new quantity/interval configuration. In addition, +// currently enqueued items may exceed any new, lower maximum entry count +// until naturally dequeued. +func (m *Matcher) SetLimits( + announcementLimitEntryCount int, + announcementRateLimitQuantity int, + announcementRateLimitInterval time.Duration, + announcementNonlimitedProxyIDs []ID, + offerLimitEntryCount int, + offerRateLimitQuantity int, + offerRateLimitInterval time.Duration) { + + nonlimitedProxyIDs := make(map[ID]struct{}) + for _, proxyID := range announcementNonlimitedProxyIDs { + nonlimitedProxyIDs[proxyID] = struct{}{} + } + + m.announcementQueueMutex.Lock() + m.announcementLimitEntryCount = announcementLimitEntryCount + m.announcementRateLimitQuantity = announcementRateLimitQuantity + m.announcementRateLimitInterval = announcementRateLimitInterval + m.announcementNonlimitedProxyIDs = nonlimitedProxyIDs + m.announcementQueueMutex.Unlock() + + m.offerQueueMutex.Lock() + m.offerLimitEntryCount = offerLimitEntryCount + m.offerRateLimitQuantity = offerRateLimitQuantity + m.offerRateLimitInterval = offerRateLimitInterval + m.offerQueueMutex.Unlock() +} + +// Start starts running the Matcher. The Matcher runs a goroutine which +// matches announcements and offers. +func (m *Matcher) Start() error { + + m.runMutex.Lock() + defer m.runMutex.Unlock() + + if m.runContext != nil { + return errors.TraceNew("already running") + } + + m.runContext, m.stopRunning = context.WithCancel(context.Background()) + + m.waitGroup.Add(1) + go func() { + defer m.waitGroup.Done() + m.matchWorker(m.runContext) + }() + + return nil +} + +// Stop stops running the Matcher and its worker goroutine. +// +// Limitation: Stop is not synchronized with Announce/Offer/Answer, so items +// can get enqueued during and after a Stop call. Stop is intended more for a +// full broker shutdown, where this won't be a concern. +func (m *Matcher) Stop() { + + m.runMutex.Lock() + defer m.runMutex.Unlock() + + m.stopRunning() + m.waitGroup.Wait() + m.runContext, m.stopRunning = nil, nil +} + +// Announce enqueues the proxy announcement and blocks until it is matched +// with a returned offer or ctx is done. The caller must not mutate the +// announcement or its properties after calling Announce. +// +// The offer is sent to the proxy by the broker, and then the proxy sends its +// answer back to the broker, which calls Answer with that value. +// +// The returned MatchMetrics is nil unless a match is made; and non-nil if a +// match is made, even if there is a later error. +func (m *Matcher) Announce( + ctx context.Context, + proxyIP string, + proxyAnnouncement *MatchAnnouncement) (*MatchOffer, *MatchMetrics, error) { + + announcementEntry := &announcementEntry{ + ctx: ctx, + limitIP: getRateLimitIP(proxyIP), + announcement: proxyAnnouncement, + offerChan: make(chan *MatchOffer, 1), + } + + err := m.addAnnouncementEntry(announcementEntry) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // Await client offer. + + var clientOffer *MatchOffer + + select { + case <-ctx.Done(): + m.removeAnnouncementEntry(announcementEntry) + return nil, announcementEntry.getMatchMetrics(), errors.Trace(ctx.Err()) + + case clientOffer = <-announcementEntry.offerChan: + } + + return clientOffer, announcementEntry.getMatchMetrics(), nil +} + +// Offer enqueues the client offer and blocks until it is matched with a +// returned announcement or ctx is done. The caller must not mutate the offer +// or its properties after calling Announce. +// +// The answer is returned to the client by the broker, and the WebRTC +// connection is dialed. The original announcement is also returned, so its +// match properties can be logged. +// +// The returned MatchMetrics is nil unless a match is made; and non-nil if a +// match is made, even if there is a later error. +func (m *Matcher) Offer( + ctx context.Context, + clientIP string, + clientOffer *MatchOffer) (*MatchAnswer, *MatchAnnouncement, *MatchMetrics, error) { + + offerEntry := &offerEntry{ + ctx: ctx, + limitIP: getRateLimitIP(clientIP), + offer: clientOffer, + answerChan: make(chan *answerInfo, 1), + } + + err := m.addOfferEntry(offerEntry) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + // Await proxy answer. + + var proxyAnswerInfo *answerInfo + + select { + case <-ctx.Done(): + m.removeOfferEntry(offerEntry) + + // TODO: also remove any pendingAnswers entry? The entry TTL is set to + // the Offer ctx, the client request, timeout, so it will eventually + // get removed. But a client may abort its request earlier than the + // timeout. + + return nil, nil, + offerEntry.getMatchMetrics(), errors.Trace(ctx.Err()) + + case proxyAnswerInfo = <-offerEntry.answerChan: + } + + if proxyAnswerInfo == nil { + + // nil will be delivered to the channel when either the proxy + // announcement request concurrently timed out, or the answer + // indicated a proxy error, or the answer did not arrive in time. + return nil, nil, + offerEntry.getMatchMetrics(), errors.TraceNew("no answer") + } + + // This is a sanity check and not expected to fail. + if !proxyAnswerInfo.answer.ConnectionID.Equal( + proxyAnswerInfo.announcement.ConnectionID) { + return nil, nil, + offerEntry.getMatchMetrics(), errors.TraceNew("unexpected connection ID") + } + + return proxyAnswerInfo.answer, + proxyAnswerInfo.announcement, + offerEntry.getMatchMetrics(), + nil +} + +// Answer delivers an answer from the proxy for a previously matched offer. +// The ProxyID and ConnectionID must correspond to the original announcement. +// The caller must not mutate the answer after calling Answer. Answer does +// not block. +// +// The answer is returned to the awaiting Offer call and sent to the matched +// client. +func (m *Matcher) Answer( + proxyAnswer *MatchAnswer) error { + + key := m.pendingAnswerKey(proxyAnswer.ProxyID, proxyAnswer.ConnectionID) + pendingAnswerValue, ok := m.pendingAnswers.Get(key) + if !ok { + // The client is no longer awaiting the response. + return errors.TraceNew("no client") + } + + m.pendingAnswers.Delete(key) + + pendingAnswer := pendingAnswerValue.(*pendingAnswer) + + pendingAnswer.answerChan <- &answerInfo{ + announcement: pendingAnswer.announcement, + answer: proxyAnswer, + } + + return nil +} + +// AnswerError delivers a failed answer indication from the proxy to an +// awaiting offer. The ProxyID and ConnectionID must correspond to the +// original announcement. +// +// The failure indication is returned to the awaiting Offer call and sent to +// the matched client. +func (m *Matcher) AnswerError(proxyID ID, connectionID ID) { + + key := m.pendingAnswerKey(proxyID, connectionID) + pendingAnswerValue, ok := m.pendingAnswers.Get(key) + if !ok { + // The client is no longer awaiting the response. + return + } + + m.pendingAnswers.Delete(key) + + // Closing the channel delivers nil, a failed indicator, to any receiver. + close(pendingAnswerValue.(*pendingAnswer).answerChan) +} + +// matchWorker is the matching worker goroutine. It idles until signaled that +// a queue item has been added, and then runs a full matching pass. +func (m *Matcher) matchWorker(ctx context.Context) { + for { + select { + case <-m.matchSignal: + m.matchAllOffers() + case <-ctx.Done(): + return + } + } +} + +// matchAllOffers iterates over the queues, making all possible matches. +func (m *Matcher) matchAllOffers() { + + m.announcementQueueMutex.Lock() + defer m.announcementQueueMutex.Unlock() + m.offerQueueMutex.Lock() + defer m.offerQueueMutex.Unlock() + + // Take each offer in turn, and select an announcement match. There is an + // implicit preference for older client offers, sooner to timeout, at the + // front of the queue. + + // TODO: consider matching one offer, then releasing the locks to allow + // more announcements to be enqueued, then continuing to match. + + i := 0 + end := m.offerQueue.Len() + + for i < end && m.announcementQueue.Len() > 0 { + + offerEntry := m.offerQueue.At(i) + + // Skip and remove this offer if its deadline has already passed. + // There is no signal to the awaiting Offer function, as it will exit + // based on the same ctx. + + if offerEntry.ctx.Err() != nil { + m.removeOfferEntryByIndex(i) + end -= 1 + continue + } + + j, ok := m.matchOffer(offerEntry) + if !ok { + + // No match, so leave this offer in place in the queue and move to + // the next. + + i++ + continue + } + + // Get the matched announcement entry. + + announcementEntry := m.announcementQueue.At(j) + + // Record match metrics. + + matchMetrics := &MatchMetrics{ + OfferMatchIndex: i, + OfferQueueSize: m.offerQueue.Len(), + AnnouncementMatchIndex: j, + AnnouncementQueueSize: m.announcementQueue.Len(), + } + + offerEntry.matchMetrics.Store(matchMetrics) + announcementEntry.matchMetrics.Store(matchMetrics) + + // Remove the matched announcement from the queue. Send the offer to + // the announcement entry's offerChan, which will deliver it to the + // blocked Announce call. Add a pending answers entry to await the + // proxy's follow up Answer call. The TTL for the pending answer + // entry is set to the matched Offer call's ctx, as the answer is + // only useful as long as the client is still waiting. + + expiry := lrucache.DefaultExpiration + deadline, ok := offerEntry.ctx.Deadline() + if ok { + expiry = time.Until(deadline) + } + + key := m.pendingAnswerKey( + announcementEntry.announcement.ProxyID, + announcementEntry.announcement.ConnectionID) + + m.pendingAnswers.Set( + key, + &pendingAnswer{ + announcement: announcementEntry.announcement, + answerChan: offerEntry.answerChan, + }, + expiry) + + announcementEntry.offerChan <- offerEntry.offer + + m.removeAnnouncementEntryByIndex(j) + + // Remove the matched offer from the queue and match the next offer, + // now first in the queue. + + m.removeOfferEntryByIndex(i) + + end -= 1 + } +} + +func (m *Matcher) matchOffer(offerEntry *offerEntry) (int, bool) { + + // Assumes the caller has the queue mutexed locked. + + // Check each announcement in turn, and select a match. There is an + // implicit preference for older proxy announcements, sooner to timeout, + // at the front of the queue. + // + // Limitation: since this logic matches each enqueued client in turn, it will + // only make the optimal NAT match for the oldest enqueued client vs. all + // proxies, and not do optimal N x M matching for all clients and all proxies. + // + // Future matching enhancements could include more sophisticated GeoIP + // rules, such as a configuration encoding knowledge of an ASN's NAT + // type, or preferred client/proxy country/ASN matches. + + // TODO: match supported protocol versions. Currently, all announces and + // offers must specify ProxyProtocolVersion1, so there's no protocol + // version match logic. + + offerProperties := &offerEntry.offer.Properties + + // Use the NAT traversal type counters to check if there's any preferred + // NAT match for this offer in the announcement queue. When there is, we + // will search beyond the first announcement. + + existsPreferredNATMatch := offerProperties.ExistsPreferredNATMatch( + m.announcementsUnlimitedNATCount > 0, + m.announcementsPartiallyLimitedNATCount > 0, + m.announcementsStrictlyLimitedNATCount > 0) + + bestMatch := -1 + bestMatchNAT := false + bestMatchCompartment := false + + end := m.announcementQueue.Len() + + // TODO: add queue indexing to facilitate skipping ahead to a matching + // personal compartment ID, if any, when personal-only matching is + // required. Personal matching may often require near-full queue scans + // when looking for a match. Common compartment matching may also benefit + // from indexing, although with a handful of common compartment IDs more + // or less uniformly distributed, frequent long scans are not expected in + // practise. + + for i := 0; i < end; i++ { + + announcementEntry := m.announcementQueue.At(i) + + // Skip and remove this announcement if its deadline has already + // passed. There is no signal to the awaiting Announce function, as + // it will exit based on the same ctx. + + if announcementEntry.ctx.Err() != nil { + m.removeAnnouncementEntryByIndex(i) + end -= 1 + continue + } + + announcementProperties := &announcementEntry.announcement.Properties + + // There must be a compartment match. If there is a personal + // compartment match, this match will be preferred. + + matchCommonCompartment := HaveCommonIDs( + announcementProperties.CommonCompartmentIDs, offerProperties.CommonCompartmentIDs) + matchPersonalCompartment := HaveCommonIDs( + announcementProperties.PersonalCompartmentIDs, offerProperties.PersonalCompartmentIDs) + if !matchCommonCompartment && !matchPersonalCompartment { + continue + } + + // Disallow matching the same country and ASN, except for personal + // compartment ID matches. + // + // For common matching, hopping through the same ISP is assumed to + // have no circumvention benefit. For personal matching, the user may + // wish to hop their their own or their friend's proxy regardless. + + if !matchPersonalCompartment && + !GetAllowCommonASNMatching() && + (offerProperties.GeoIPData.Country == + announcementProperties.GeoIPData.Country && + offerProperties.GeoIPData.ASN == + announcementProperties.GeoIPData.ASN) { + continue + } + + // Check if this is a preferred NAT match. Ultimately, a match may be + // made with potentially incompatible NATs, but the client/proxy + // reported NAT types may be incorrect or unknown; the client will + // often skip NAT discovery. + + matchNAT := offerProperties.IsPreferredNATMatch(announcementProperties) + + // At this point, the candidate is a match. Determine if this is a new + // best match. + + if bestMatch == -1 { + + // This is a match, and there was no previous match, so it becomes + // the provisional best match. + + bestMatch = i + bestMatchNAT = matchNAT + bestMatchCompartment = matchPersonalCompartment + + } else if !bestMatchNAT && matchNAT { + + // If there was a previous best match which was not a preferred + // NAT match, this becomes the new best match. The preferred NAT + // match is prioritized over personal compartment matching. + + bestMatch = i + bestMatchNAT = true + bestMatchCompartment = matchPersonalCompartment + + } else if !bestMatchCompartment && matchPersonalCompartment && (!bestMatchNAT || matchNAT) { + + // If there was a previous best match which was not a personal + // compartment match, and as long as this match doesn't undo a + // better NAT match, this becomes the new best match. + + bestMatch = i + bestMatchNAT = matchNAT + bestMatchCompartment = true + } + + // Stop as soon as we have the best possible match. + + if (bestMatchNAT || !existsPreferredNATMatch) && + (matchPersonalCompartment || + m.announcementsPersonalCompartmentalizedCount == 0 || + len(offerProperties.PersonalCompartmentIDs) == 0) { + break + } + } + + return bestMatch, bestMatch != -1 +} + +// MatcherLimitError is the error type returned by Announce or Offer when the +// caller has exceeded configured queue entry or rate limits. +type MatcherLimitError struct { + err error +} + +func NewMatcherLimitError(err error) *MatcherLimitError { + return &MatcherLimitError{err: err} +} + +func (e MatcherLimitError) Error() string { + return e.err.Error() +} + +func (m *Matcher) applyLimits(isAnnouncement bool, limitIP string, proxyID ID) error { + + // Assumes the m.announcementQueueMutex or m.offerQueue mutex is locked. + + var entryCountByIP map[string]int + var queueRateLimiters *lrucache.Cache + var limitEntryCount int + var quantity int + var interval time.Duration + + if isAnnouncement { + + // Skip limit checks for non-limited proxies. + if _, ok := m.announcementNonlimitedProxyIDs[proxyID]; ok { + return nil + } + + entryCountByIP = m.announcementQueueEntryCountByIP + queueRateLimiters = m.announcementQueueRateLimiters + limitEntryCount = m.announcementLimitEntryCount + quantity = m.announcementRateLimitQuantity + interval = m.announcementRateLimitInterval + + } else { + entryCountByIP = m.offerQueueEntryCountByIP + queueRateLimiters = m.offerQueueRateLimiters + limitEntryCount = m.offerLimitEntryCount + quantity = m.offerRateLimitQuantity + interval = m.offerRateLimitInterval + } + + // The rate limit is checked first, before the max count check, to ensure + // that the rate limit state is updated regardless of the max count check + // outcome. + + if quantity > 0 && interval > 0 { + + var rateLimiter *rate.Limiter + + entry, ok := queueRateLimiters.Get(limitIP) + if ok { + rateLimiter = entry.(*rate.Limiter) + } else { + limit := float64(quantity) / interval.Seconds() + rateLimiter = rate.NewLimiter(rate.Limit(limit), quantity) + queueRateLimiters.Set( + limitIP, rateLimiter, interval) + } + + if !rateLimiter.Allow() { + return errors.Trace( + NewMatcherLimitError(std_errors.New("rate exceeded for IP"))) + } + } + + if limitEntryCount > 0 { + + // Limitation: non-limited proxy ID entries are counted in + // entryCountByIP. If both a limited and non-limited proxy ingress + // from the same limitIP, then the non-limited entries will count + // against the limited proxy's limitEntryCount. + + entryCount, ok := entryCountByIP[limitIP] + if ok && entryCount >= limitEntryCount { + return errors.Trace( + NewMatcherLimitError(std_errors.New("max entries for IP"))) + } + } + + return nil +} + +func (m *Matcher) addAnnouncementEntry(announcementEntry *announcementEntry) error { + + m.announcementQueueMutex.Lock() + defer m.announcementQueueMutex.Unlock() + + // Ensure the queue doesn't grow larger than the max size. + if m.announcementQueue.Len() >= matcherAnnouncementQueueMaxSize { + return errors.TraceNew("queue full") + } + + // Ensure no single peer IP can enqueue a large number of entries or + // rapidly enqueue beyond the configured rate. + isAnnouncement := true + err := m.applyLimits( + isAnnouncement, announcementEntry.limitIP, announcementEntry.announcement.ProxyID) + if err != nil { + return errors.Trace(err) + } + + m.announcementQueue.PushBack(announcementEntry) + + m.announcementQueueEntryCountByIP[announcementEntry.limitIP] += 1 + + m.adjustAnnouncementCounts(announcementEntry, 1) + + select { + case m.matchSignal <- struct{}{}: + default: + } + + return nil +} + +func (m *Matcher) removeAnnouncementEntry(announcementEntry *announcementEntry) { + + m.announcementQueueMutex.Lock() + defer m.announcementQueueMutex.Unlock() + + found := false + for i := 0; i < m.announcementQueue.Len(); i++ { + if m.announcementQueue.At(i) == announcementEntry { + m.removeAnnouncementEntryByIndex(i) + found = true + break + } + } + if !found { + + // The Announce call is aborting and taking its entry back out of the + // queue. If the entry is not found in the queue, then a concurrent + // Offer has matched the announcement. So check for the pending + // answer corresponding to the announcement and remove it and deliver + // a failure signal to the waiting Offer, so the client doesn't wait + // longer than necessary. + + key := m.pendingAnswerKey( + announcementEntry.announcement.ProxyID, + announcementEntry.announcement.ConnectionID) + + pendingAnswerValue, ok := m.pendingAnswers.Get(key) + if ok { + close(pendingAnswerValue.(*pendingAnswer).answerChan) + m.pendingAnswers.Delete(key) + } + } +} + +func (m *Matcher) removeAnnouncementEntryByIndex(i int) { + + // Assumes s.announcementQueueMutex lock is held. + + announcementEntry := m.announcementQueue.At(i) + + // This should be only direct call to Remove, as following adjustments + // must always be made when removing. + m.announcementQueue.Remove(i) + + // Adjust entry counts by peer IP, used to enforce + // matcherAnnouncementQueueMaxEntriesPerIP. + m.announcementQueueEntryCountByIP[announcementEntry.limitIP] -= 1 + if m.announcementQueueEntryCountByIP[announcementEntry.limitIP] == 0 { + delete(m.announcementQueueEntryCountByIP, announcementEntry.limitIP) + } + + m.adjustAnnouncementCounts(announcementEntry, -1) +} + +func (m *Matcher) adjustAnnouncementCounts( + announcementEntry *announcementEntry, delta int) { + + // Assumes s.announcementQueueMutex lock is held. + + if announcementEntry.announcement.Properties.IsPersonalCompartmentalized() { + m.announcementsPersonalCompartmentalizedCount += delta + } + + switch announcementEntry.announcement.Properties.EffectiveNATType().Traversal() { + case NATTraversalUnlimited: + m.announcementsUnlimitedNATCount += delta + case NATTraversalPartiallyLimited: + m.announcementsPartiallyLimitedNATCount += delta + case NATTraversalStrictlyLimited: + m.announcementsStrictlyLimitedNATCount += delta + } +} + +func (m *Matcher) addOfferEntry(offerEntry *offerEntry) error { + + m.offerQueueMutex.Lock() + defer m.offerQueueMutex.Unlock() + + // Ensure the queue doesn't grow larger than the max size. + if m.offerQueue.Len() >= matcherOfferQueueMaxSize { + return errors.TraceNew("queue full") + } + + // Ensure no single peer IP can enqueue a large number of entries or + // rapidly enqueue beyond the configured rate. + isAnnouncement := false + err := m.applyLimits( + isAnnouncement, offerEntry.limitIP, ID{}) + if err != nil { + return errors.Trace(err) + } + + m.offerQueue.PushBack(offerEntry) + + m.offerQueueEntryCountByIP[offerEntry.limitIP] += 1 + + select { + case m.matchSignal <- struct{}{}: + default: + } + + return nil +} + +func (m *Matcher) removeOfferEntry(offerEntry *offerEntry) { + + m.offerQueueMutex.Lock() + defer m.offerQueueMutex.Unlock() + + for i := 0; i < m.offerQueue.Len(); i++ { + if m.offerQueue.At(i) == offerEntry { + m.removeOfferEntryByIndex(i) + break + } + } +} + +func (m *Matcher) removeOfferEntryByIndex(i int) { + + // Assumes s.offerQueueMutex lock is held. + + offerEntry := m.offerQueue.At(i) + + // This should be only direct call to Remove, as following adjustments + // must always be made when removing. + m.offerQueue.Remove(i) + + // Adjust entry counts by peer IP, used to enforce + // matcherOfferQueueMaxEntriesPerIP. + m.offerQueueEntryCountByIP[offerEntry.limitIP] -= 1 + if m.offerQueueEntryCountByIP[offerEntry.limitIP] == 0 { + delete(m.offerQueueEntryCountByIP, offerEntry.limitIP) + } +} + +func (m *Matcher) pendingAnswerKey(proxyID ID, connectionID ID) string { + + // The pending answer lookup key is used to associate announcements and + // subsequent answers. While the client learns the ConnectionID, only the + // proxy knows the ProxyID component, so only the correct proxy can match + // an answer to an announcement. The ConnectionID component is necessary + // as a proxy may have multiple, concurrent pending answers. + + return string(proxyID[:]) + string(connectionID[:]) +} + +func getRateLimitIP(strIP string) string { + + IP := net.ParseIP(strIP) + if IP == nil || IP.To4() != nil { + return strIP + } + + // With IPv6, individual users or sites are users commonly allocated a /64 + // or /56, so rate limit by /56. + return IP.Mask(net.CIDRMask(56, 128)).String() +} diff --git a/psiphon/common/inproxy/matcher_test.go b/psiphon/common/inproxy/matcher_test.go new file mode 100644 index 000000000..9c0182c1f --- /dev/null +++ b/psiphon/common/inproxy/matcher_test.go @@ -0,0 +1,678 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func TestMatcher(t *testing.T) { + err := runTestMatcher() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } + +} + +func runTestMatcher() error { + + limitEntryCount := 50 + rateLimitQuantity := 100 + rateLimitInterval := 1000 * time.Millisecond + + logger := newTestLogger() + + m := NewMatcher( + &MatcherConfig{ + Logger: logger, + + AnnouncementLimitEntryCount: limitEntryCount, + AnnouncementRateLimitQuantity: rateLimitQuantity, + AnnouncementRateLimitInterval: rateLimitInterval, + + OfferLimitEntryCount: limitEntryCount, + OfferRateLimitQuantity: rateLimitQuantity, + OfferRateLimitInterval: rateLimitInterval, + }) + err := m.Start() + if err != nil { + return errors.Trace(err) + } + defer m.Stop() + + makeID := func() ID { + ID, err := MakeID() + if err != nil { + panic(err) + } + return ID + } + + makeAnnouncement := func(properties *MatchProperties) *MatchAnnouncement { + return &MatchAnnouncement{ + Properties: *properties, + ProxyID: makeID(), + ConnectionID: makeID(), + } + } + + makeOffer := func(properties *MatchProperties) *MatchOffer { + return &MatchOffer{ + Properties: *properties, + ClientProxyProtocolVersion: ProxyProtocolVersion1, + } + } + + checkMatchMetrics := func(metrics *MatchMetrics) error { + if metrics.OfferQueueSize < 1 || metrics.AnnouncementQueueSize < 1 { + return errors.TraceNew("unexpected match metrics") + } + return nil + } + + proxyIP := randomIPAddress() + + proxyFunc := func( + resultChan chan error, + proxyIP string, + matchProperties *MatchProperties, + timeout time.Duration, + waitBeforeAnswer chan struct{}, + answerSuccess bool) { + + ctx, cancelFunc := context.WithTimeout(context.Background(), timeout) + defer cancelFunc() + + announcement := makeAnnouncement(matchProperties) + offer, matchMetrics, err := m.Announce(ctx, proxyIP, announcement) + if err != nil { + resultChan <- errors.Trace(err) + return + } else { + err := checkMatchMetrics(matchMetrics) + if err != nil { + resultChan <- errors.Trace(err) + return + } + } + + if waitBeforeAnswer != nil { + <-waitBeforeAnswer + } + + if answerSuccess { + err = m.Answer( + &MatchAnswer{ + ProxyID: announcement.ProxyID, + ConnectionID: announcement.ConnectionID, + SelectedProxyProtocolVersion: offer.ClientProxyProtocolVersion, + }) + } else { + m.AnswerError(announcement.ProxyID, announcement.ConnectionID) + } + resultChan <- errors.Trace(err) + } + + clientIP := randomIPAddress() + + clientFunc := func( + resultChan chan error, + clientIP string, + matchProperties *MatchProperties, + timeout time.Duration) { + + ctx, cancelFunc := context.WithTimeout(context.Background(), timeout) + defer cancelFunc() + + offer := makeOffer(matchProperties) + answer, _, matchMetrics, err := m.Offer(ctx, clientIP, offer) + if err != nil { + resultChan <- errors.Trace(err) + return + } + if answer.SelectedProxyProtocolVersion != offer.ClientProxyProtocolVersion { + resultChan <- errors.TraceNew("unexpected selected proxy protocol version") + return + } else { + err := checkMatchMetrics(matchMetrics) + if err != nil { + resultChan <- errors.Trace(err) + return + } + } + resultChan <- nil + } + + // Test: announce timeout + + proxyResultChan := make(chan error) + + go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 1*time.Microsecond, nil, true) + + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + if m.announcementQueue.Len() != 0 { + return errors.TraceNew("unexpected queue size") + } + + // Test: limit announce entries by IP + + time.Sleep(rateLimitInterval) + + maxEntries := limitEntryCount + maxEntriesProxyResultChan := make(chan error, maxEntries) + + // fill the queue with max entries for one IP; the first one will timeout sooner + go proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + for i := 0; i < maxEntries-1; i++ { + go proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 100*time.Millisecond, nil, true) + } + + // await goroutines filling queue + for { + time.Sleep(10 * time.Microsecond) + m.announcementQueueMutex.Lock() + queueLen := m.announcementQueue.Len() + m.announcementQueueMutex.Unlock() + if queueLen == maxEntries { + break + } + } + + // the next enqueue should fail with "max entries" + go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "max entries for IP") { + return errors.Tracef("unexpected result: %v", err) + } + + // wait for first entry to timeout + err = <-maxEntriesProxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // now another enqueue succeeds as expected + go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // drain remaining entries + for i := 0; i < maxEntries-1; i++ { + err = <-maxEntriesProxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + } + + // Test: offer timeout + + clientResultChan := make(chan error) + + go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 1*time.Microsecond) + + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + if m.offerQueue.Len() != 0 { + return errors.TraceNew("unexpected queue size") + } + + // Test: limit offer entries by IP + + time.Sleep(rateLimitInterval) + + maxEntries = limitEntryCount + maxEntriesClientResultChan := make(chan error, maxEntries) + + // fill the queue with max entries for one IP; the first one will timeout sooner + go clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + for i := 0; i < maxEntries-1; i++ { + go clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 100*time.Millisecond) + } + + // await goroutines filling queue + for { + time.Sleep(10 * time.Microsecond) + + m.offerQueueMutex.Lock() + queueLen := m.offerQueue.Len() + m.offerQueueMutex.Unlock() + if queueLen == maxEntries { + break + } + } + + // enqueue should fail with "max entries" + go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "max entries for IP") { + return errors.Tracef("unexpected result: %v", err) + } + + // wait for first entry to timeout + err = <-maxEntriesClientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // now another enqueue succeeds as expected + go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // drain remaining entries + for i := 0; i < maxEntries-1; i++ { + err = <-maxEntriesClientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + } + + // Test: announcement rate limit + + m.SetLimits( + 0, rateLimitQuantity, rateLimitInterval, []ID{}, + 0, rateLimitQuantity, rateLimitInterval) + + time.Sleep(rateLimitInterval) + + maxEntries = rateLimitQuantity + maxEntriesProxyResultChan = make(chan error, maxEntries) + + waitGroup := new(sync.WaitGroup) + for i := 0; i < maxEntries; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 1*time.Microsecond, nil, true) + }() + } + + // Use a wait group to ensure all maxEntries have hit the rate limiter + // without sleeping before the next attempt, as any sleep can increase + // the rate limiter token count. + waitGroup.Wait() + + // the next enqueue should fail with "rate exceeded" + go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "rate exceeded for IP") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: offer rate limit + + maxEntries = rateLimitQuantity + maxEntriesClientResultChan = make(chan error, maxEntries) + + waitGroup = new(sync.WaitGroup) + for i := 0; i < rateLimitQuantity; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 1*time.Microsecond) + }() + } + + waitGroup.Wait() + + // enqueue should fail with "rate exceeded" + go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "rate exceeded for IP") { + return errors.Tracef("unexpected result: %v", err) + } + + time.Sleep(rateLimitInterval) + + m.SetLimits( + limitEntryCount, rateLimitQuantity, rateLimitInterval, []ID{}, + limitEntryCount, rateLimitQuantity, rateLimitInterval) + + // Test: basic match + + basicCommonCompartmentIDs := []ID{makeID()} + + geoIPData1 := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C1", ASN: "A1"}, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + geoIPData2 := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C2", ASN: "A2"}, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + go proxyFunc(proxyResultChan, proxyIP, geoIPData1, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, geoIPData2, 10*time.Millisecond) + + err = <-proxyResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-clientResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: answer error + + go proxyFunc(proxyResultChan, proxyIP, geoIPData1, 10*time.Millisecond, nil, false) + go clientFunc(clientResultChan, clientIP, geoIPData2, 10*time.Millisecond) + + err = <-proxyResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "no answer") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: client is gone + + waitBeforeAnswer := make(chan struct{}) + + go proxyFunc(proxyResultChan, proxyIP, geoIPData1, 100*time.Millisecond, waitBeforeAnswer, true) + go clientFunc(clientResultChan, clientIP, geoIPData2, 10*time.Millisecond) + + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + close(waitBeforeAnswer) + + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "no client") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: no compartment match + + compartment1 := &MatchProperties{ + GeoIPData: geoIPData1.GeoIPData, + CommonCompartmentIDs: []ID{makeID()}, + PersonalCompartmentIDs: []ID{makeID()}, + } + + compartment2 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + CommonCompartmentIDs: []ID{makeID()}, + PersonalCompartmentIDs: []ID{makeID()}, + } + + go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, compartment2, 10*time.Millisecond) + + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: common compartment match + + compartment1And2 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0], compartment2.CommonCompartmentIDs[0]}, + } + + go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, compartment1And2, 10*time.Millisecond) + + err = <-proxyResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-clientResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: personal compartment match + + compartment1And2 = &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0], compartment2.PersonalCompartmentIDs[0]}, + } + + go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, compartment1And2, 10*time.Millisecond) + + err = <-proxyResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-clientResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: personal compartment preferred match + + compartment1Common := &MatchProperties{ + GeoIPData: geoIPData1.GeoIPData, + CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0]}, + } + + compartment1Personal := &MatchProperties{ + GeoIPData: geoIPData1.GeoIPData, + PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0]}, + } + + compartment1CommonAndPersonal := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0]}, + PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0]}, + } + + client1ResultChan := make(chan error) + client2ResultChan := make(chan error) + + proxy1ResultChan := make(chan error) + proxy2ResultChan := make(chan error) + + go proxyFunc(proxy1ResultChan, proxyIP, compartment1Common, 10*time.Millisecond, nil, true) + go proxyFunc(proxy2ResultChan, proxyIP, compartment1Personal, 10*time.Millisecond, nil, true) + time.Sleep(5 * time.Millisecond) // Hack to ensure both proxies are enqueued + go clientFunc(client1ResultChan, clientIP, compartment1CommonAndPersonal, 10*time.Millisecond) + + err = <-proxy1ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // proxy2 should match since it has the preferred personal compartment ID + err = <-proxy2ResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-client1ResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: no same-ASN match + + go proxyFunc(proxyResultChan, proxyIP, geoIPData1, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, geoIPData1, 10*time.Millisecond) + + err = <-proxyResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + err = <-clientResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: proxy preferred NAT match + + client1Properties := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C1", ASN: "A1"}, + NATType: NATTypeFullCone, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + client2Properties := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C2", ASN: "A2"}, + NATType: NATTypeSymmetric, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + proxy1Properties := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C3", ASN: "A3"}, + NATType: NATTypeNone, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + proxy2Properties := &MatchProperties{ + GeoIPData: common.GeoIPData{Country: "C4", ASN: "A4"}, + NATType: NATTypeSymmetric, + CommonCompartmentIDs: basicCommonCompartmentIDs, + } + + go proxyFunc(proxy1ResultChan, proxyIP, proxy1Properties, 10*time.Millisecond, nil, true) + go proxyFunc(proxy2ResultChan, proxyIP, proxy2Properties, 10*time.Millisecond, nil, true) + time.Sleep(5 * time.Millisecond) // Hack to ensure both proxies are enqueued + go clientFunc(client1ResultChan, clientIP, client1Properties, 10*time.Millisecond) + + err = <-proxy1ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // proxy2 should match since it's the preferred NAT match + err = <-proxy2ResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-client1ResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: client preferred NAT match + + // Limitation: the current Matcher.matchAllOffers logic matches the first + // enqueued client against the best proxy match, regardless of whether + // there is another client in the queue that's a better match for that + // proxy. As a result, this test only passes when the preferred matching + // client is enqueued first, and the test is currently of limited utility. + + go clientFunc(client2ResultChan, clientIP, client2Properties, 20*time.Millisecond) + time.Sleep(5 * time.Millisecond) // Hack to client is enqueued + go clientFunc(client1ResultChan, clientIP, client1Properties, 20*time.Millisecond) + time.Sleep(5 * time.Millisecond) // Hack to client is enqueued + go proxyFunc(proxy1ResultChan, proxyIP, proxy1Properties, 20*time.Millisecond, nil, true) + + err = <-proxy1ResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-client1ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) + } + + // client2 should match since it's the preferred NAT match + err = <-client2ResultChan + if err != nil { + return errors.Trace(err) + } + + // Test: many matches + + // Reduce test log noise for this phase of the test + logger.SetLogLevelDebug(false) + + matchCount := 10000 + proxyCount := matchCount + clientCount := matchCount + + // Buffered so no goroutine will block reporting result + proxyResultChan = make(chan error, matchCount) + clientResultChan = make(chan error, matchCount) + + for proxyCount > 0 || clientCount > 0 { + + // Don't simply alternate enqueuing a proxy and a client + if proxyCount > 0 && (clientCount == 0 || prng.FlipCoin()) { + go proxyFunc(proxyResultChan, randomIPAddress(), geoIPData1, 10*time.Second, nil, true) + proxyCount -= 1 + + } else if clientCount > 0 { + go clientFunc(clientResultChan, randomIPAddress(), geoIPData2, 10*time.Second) + clientCount -= 1 + } + } + + for i := 0; i < matchCount; i++ { + err = <-proxyResultChan + if err != nil { + return errors.Trace(err) + } + + err = <-clientResultChan + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +func randomIPAddress() string { + return fmt.Sprintf("%d.%d.%d.%d", + prng.Range(0, 255), + prng.Range(0, 255), + prng.Range(0, 255), + prng.Range(0, 255)) +} diff --git a/psiphon/common/inproxy/nat.go b/psiphon/common/inproxy/nat.go new file mode 100644 index 000000000..2611a804b --- /dev/null +++ b/psiphon/common/inproxy/nat.go @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "fmt" +) + +// NATMapping is a NAT mapping behavior defined in RFC 4787, section 4.1. +type NATMapping int32 + +const ( + NATMappingUnknown NATMapping = iota + NATMappingEndpointIndependent + NATMappingAddressDependent + NATMappingAddressPortDependent +) + +func (m NATMapping) String() string { + switch m { + case NATMappingUnknown: + return "MappingUnknown" + case NATMappingEndpointIndependent: + return "MappingEndpointIndependent" + case NATMappingAddressDependent: + return "MappingAddressDependent" + case NATMappingAddressPortDependent: + return "MappingAddressPortDependent" + } + return "" +} + +// MarshalText ensures the string representation of the value is logged in +// JSON. +func (m NATMapping) MarshalText() ([]byte, error) { + return []byte(m.String()), nil +} + +func (m NATMapping) IsValid() bool { + return m.String() != "" +} + +// NATMapping is a NAT filtering behavior defined in RFC 4787, section 5. +type NATFiltering int32 + +const ( + NATFilteringUnknown NATFiltering = iota + NATFilteringEndpointIndependent + NATFilteringAddressDependent + NATFilteringAddressPortDependent +) + +func (f NATFiltering) String() string { + switch f { + case NATFilteringUnknown: + return "FilteringUnknown" + case NATFilteringEndpointIndependent: + return "FilteringEndpointIndependent" + case NATFilteringAddressDependent: + return "FilteringAddressDependent" + case NATFilteringAddressPortDependent: + return "FilteringAddressPortDependent" + } + return "" +} + +// MarshalText ensures the string representation of the value is logged in +// JSON. +func (f NATFiltering) MarshalText() ([]byte, error) { + return []byte(f.String()), nil +} + +func (f NATFiltering) IsValid() bool { + return f.String() != "" +} + +// NATType specifies a network's NAT behavior and consists of a NATMapping and +// a NATFiltering component. +type NATType int32 + +// MakeNATType creates a new NATType. +func MakeNATType(mapping NATMapping, filtering NATFiltering) NATType { + return (NATType(mapping) << 2) | NATType(filtering) +} + +var ( + NATTypeUnknown = MakeNATType(NATMappingUnknown, NATFilteringUnknown) + + // NATTypePortMapping is a pseudo NATType, used in matching, that + // represents the relevant NAT behavior of a port mapping (e.g., UPnP-IGD). + NATTypePortMapping = MakeNATType(NATMappingEndpointIndependent, NATFilteringEndpointIndependent) + + // NATTypeMobileNetwork is a pseudo NATType, usied in matching, that + // represents the assumed and relevent NAT behavior of clients on mobile + // networks, presumed to be behind CGNAT when they report NATTypeUnknown. + NATTypeMobileNetwork = MakeNATType(NATMappingAddressPortDependent, NATFilteringAddressPortDependent) + + // NATTypeNone and the following NATType constants are used in testing. + // They are not entirely precise (a symmetric NAT may have a different + // mix of mapping and filtering values). The matching logic does not use + // specific NAT type definitions and instead considers the reported + // mapping and filtering values. + NATTypeNone = MakeNATType(NATMappingEndpointIndependent, NATFilteringEndpointIndependent) + NATTypeFullCone = MakeNATType(NATMappingEndpointIndependent, NATFilteringEndpointIndependent) + NATTypeRestrictedCone = MakeNATType(NATMappingEndpointIndependent, NATFilteringAddressDependent) + NATTypePortRestrictedCone = MakeNATType(NATMappingEndpointIndependent, NATFilteringAddressPortDependent) + NATTypeSymmetric = MakeNATType(NATMappingAddressPortDependent, NATFilteringAddressPortDependent) +) + +// NeedsDiscovery indicates that the NATType is unknown and should be +// discovered. +func (t NATType) NeedsDiscovery() bool { + return t == NATTypeUnknown +} + +// Mapping extracts the NATMapping component of this NATType. +func (t NATType) Mapping() NATMapping { + return NATMapping(t >> 2) +} + +// Filtering extracts the NATFiltering component of this NATType. +func (t NATType) Filtering() NATFiltering { + return NATFiltering(t & 0x3) +} + +// Traversal returns the NATTraversal classification for this NATType. +func (t NATType) Traversal() NATTraversal { + return MakeTraversal(t) +} + +// Compatible indicates whether the NATType NATTraversals are compatible. +func (t NATType) Compatible(t1 NATType) bool { + return t.Traversal().Compatible(t1.Traversal()) +} + +// IsPreferredMatch indicates whether the peer NATType's NATTraversal is +// preferred. +func (t NATType) IsPreferredMatch(t1 NATType) bool { + return t.Traversal().IsPreferredMatch(t1.Traversal()) +} + +// ExistsPreferredMatch indicates whhether there exists a preferred match for +// the NATType's NATTraversal. +func (t NATType) ExistsPreferredMatch(unlimited, partiallyLimited, limited bool) bool { + return t.Traversal().ExistsPreferredMatch(unlimited, partiallyLimited, limited) +} + +func (t NATType) String() string { + return fmt.Sprintf( + "%s/%s", t.Mapping().String(), t.Filtering().String()) +} + +// MarshalText ensures the string representation of the value is logged in +// JSON. +func (t NATType) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +func (t NATType) IsValid() bool { + return t.Mapping().IsValid() && t.Filtering().IsValid() +} + +// NATTraversal classifies the NAT traversal potential for a NATType. NATTypes +// are determined to be compatible -- that is, a connection between the +// corresponding networks can be established via STUN hole punching -- based +// on their respective NATTraversal classifications. +type NATTraversal int32 + +const ( + NATTraversalUnlimited NATTraversal = iota + NATTraversalPartiallyLimited + NATTraversalStrictlyLimited +) + +// MakeTraversal returns the NATTraversal classification for the given +// NATType. +func MakeTraversal(t NATType) NATTraversal { + mapping := t.Mapping() + filtering := t.Filtering() + if mapping == NATMappingEndpointIndependent { + if filtering != NATFilteringAddressPortDependent { + // NAT type is, e.g., none, full cone, or restricted cone. + return NATTraversalUnlimited + } + // NAT type is, e.g., port restricted cone. + return NATTraversalPartiallyLimited + } + + // NAT type is, e.g., symmetric; or unknown -- where we assume the worst + // case. + return NATTraversalStrictlyLimited +} + +// Compatible indicates whether the NATTraversals are compatible. +func (t NATTraversal) Compatible(t1 NATTraversal) bool { + + // See the NAT compatibility matrix here: + // https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/NAT-matching#nat-compatibility + + switch t { + case NATTraversalUnlimited: + // t1 can be any value when t is unlimited. + return true + case NATTraversalPartiallyLimited: + // t1 can be unlimited or partially limited when t is partially limited. + return t1 != NATTraversalStrictlyLimited + case NATTraversalStrictlyLimited: + // t1 must be unlimited when t is limited. + return t1 == NATTraversalUnlimited + } + return false +} + +// IsPreferredMatch indicates whether the peer NATTraversal is a preferred +// match for this NATTraversal. A match is preferred, and so prioritized, +// when one of the two NATTraversals is more limited, but the pair is still +// compatible. This preference attempt to reserve less limited match +// candidates for those peers that need them. +func (t NATTraversal) IsPreferredMatch(t1 NATTraversal) bool { + switch t { + case NATTraversalUnlimited: + // Prefer matching unlimited peers with strictly limited peers. + // TODO: prefer matching unlimited with partially limited? + return t1 == NATTraversalStrictlyLimited + case NATTraversalPartiallyLimited: + // Prefer matching partially limited peers with unlimited or other + // partially limited peers. + return t1 == NATTraversalUnlimited || t1 == NATTraversalPartiallyLimited + case NATTraversalStrictlyLimited: + // Prefer matching strictly limited peers with unlimited peers. + return t1 == NATTraversalUnlimited + } + return false +} + +// ExistsPreferredMatch indicates whether a preferred match exists, for this +// NATTraversal, when there are unlimited/partiallyLimited/strictlyLimited candidates +// available. +func (t NATTraversal) ExistsPreferredMatch(unlimited, partiallyLimited, strictlyLimited bool) bool { + switch t { + case NATTraversalUnlimited: + return strictlyLimited + case NATTraversalPartiallyLimited: + return unlimited || partiallyLimited + case NATTraversalStrictlyLimited: + return unlimited + } + return false +} + +// PortMappingType is a port mapping protocol supported by a network. Values +// include UPnP-IGD, NAT-PMP, and PCP. +type PortMappingType int32 + +const ( + PortMappingTypeNone PortMappingType = iota + PortMappingTypeUPnP + PortMappingTypePMP + PortMappingTypePCP +) + +func (t PortMappingType) String() string { + switch t { + case PortMappingTypeNone: + return "None" + case PortMappingTypeUPnP: + return "UPnP-IGD" + case PortMappingTypePMP: + return "PMP" + case PortMappingTypePCP: + return "PCP" + } + return "" +} + +// MarshalText ensures the string representation of the value is logged in +// JSON. +func (t PortMappingType) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +func (t PortMappingType) IsValid() bool { + return t.String() != "" +} + +// PortMappingTypes is a list of port mapping protocol supported by a +// network. +type PortMappingTypes []PortMappingType + +// NeedsDiscovery indicates that the list of port mapping types is empty and +// should be discovered. If a network has no supported port mapping types, +// its list will include PortMappingTypeNone. +func (t PortMappingTypes) NeedsDiscovery() bool { + return len(t) == 0 +} + +// Available indicates that at least one port mapping protocol is supported. +func (t PortMappingTypes) Available() bool { + for _, portMappingType := range t { + if portMappingType > PortMappingTypeNone { + return true + } + } + return false +} + +func (t PortMappingTypes) IsValid() bool { + for _, portMappingType := range t { + if !portMappingType.IsValid() { + return false + } + } + return true +} + +// ICECandidateType is an ICE candidate type: host for public addresses, port +// mapping for when a port mapping protocol was used to establish a public +// address, or server reflexive when STUN hole punching was used to create a +// public address. Peer reflexive candidates emerge during the ICE +// negotiation process and are not SDP entries. +type ICECandidateType int32 + +const ( + ICECandidateUnknown ICECandidateType = iota + ICECandidateHost + ICECandidatePortMapping + ICECandidateServerReflexive + ICECandidatePeerReflexive +) + +func (t ICECandidateType) String() string { + switch t { + case ICECandidateUnknown: + return "Unknown" + case ICECandidateHost: + return "Host" + case ICECandidatePortMapping: + return "PortMapping" + case ICECandidateServerReflexive: + return "ServerReflexive" + case ICECandidatePeerReflexive: + return "PeerReflexive" + } + return "" +} + +// MarshalText ensures the string representation of the value is logged in +// JSON. +func (t ICECandidateType) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +func (t ICECandidateType) IsValid() bool { + return t.String() != "" +} + +// ICECandidateTypes is a list of ICE candidate types. +type ICECandidateTypes []ICECandidateType + +func (t ICECandidateTypes) IsValid() bool { + for _, candidateType := range t { + if !candidateType.IsValid() { + return false + } + } + return true +} diff --git a/psiphon/common/inproxy/obfuscation.go b/psiphon/common/inproxy/obfuscation.go new file mode 100644 index 000000000..7311e109f --- /dev/null +++ b/psiphon/common/inproxy/obfuscation.go @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/bits-and-blooms/bloom/v3" + "golang.org/x/crypto/hkdf" +) + +const ( + obfuscationSessionPacketNonceSize = 12 + obfuscationAntiReplayTimePeriod = 10 * time.Minute + obfuscationAntiReplayHistorySize = 10_000_000 +) + +// ObfuscationSecret is shared, semisecret value used in obfuscation layers. +type ObfuscationSecret [32]byte + +// ObfuscationSecretFromString returns an ObfuscationSecret given its string encoding. +func ObfuscationSecretFromString(s string) (ObfuscationSecret, error) { + var secret ObfuscationSecret + return secret, errors.Trace(fromBase64String(s, secret[:])) +} + +// String emits ObfuscationSecrets as base64. +func (secret ObfuscationSecret) String() string { + return base64.RawStdEncoding.EncodeToString([]byte(secret[:])) +} + +// GenerateRootObfuscationSecret creates a new ObfuscationSecret using +// crypto/rand. +func GenerateRootObfuscationSecret() (ObfuscationSecret, error) { + + var secret ObfuscationSecret + _, err := rand.Read(secret[:]) + if err != nil { + return secret, errors.Trace(err) + } + + return secret, nil +} + +// antiReplayTimeFactorPeriodSeconds is variable, to enable overriding the value in +// tests. This value should not be overridden outside of test +// cases. +var antiReplayTimeFactorPeriodSeconds = int64( + obfuscationAntiReplayTimePeriod / time.Second) + +// deriveObfuscationSecret derives an obfuscation secret from the root secret, +// and context. +func deriveObfuscationSecret( + rootObfuscationSecret ObfuscationSecret, + context string) (ObfuscationSecret, error) { + + var key ObfuscationSecret + _, err := io.ReadFull( + hkdf.New(sha256.New, rootObfuscationSecret[:], nil, []byte(context)), key[:]) + if err != nil { + return key, errors.Trace(err) + } + + return key, nil +} + +// deriveSessionPacketObfuscationSecret derives a common session obfuscation +// secret for either end of a session. Set isInitiator to true for packets +// sent or received by the initator; and false for packets sent or received +// by a responder. Set isObfuscating to true for sent packets, and false for +// received packets. +func deriveSessionPacketObfuscationSecret( + rootObfuscationSecret ObfuscationSecret, + isInitiator bool, + isObfuscating bool) (ObfuscationSecret, error) { + + // Upstream is packets from the initiator to the responder; or, + // (isInitiator && isObfuscating) || (!isInitiator && !isObfuscating) + isUpstream := (isInitiator == isObfuscating) + + // Derive distinct keys for each flow direction, to ensure that the two + // flows can't simply be xor'd. + context := "in-proxy-session-packet-intiator-to-responder" + if !isUpstream { + context = "in-proxy-session-packet-responder-to-initiator" + } + + key, err := deriveObfuscationSecret(rootObfuscationSecret, context) + if err != nil { + return ObfuscationSecret{}, errors.Trace(err) + } + + return key, nil +} + +// deriveSessionPacketObfuscationSecrets derives both send and receive +// obfuscation secrets. +func deriveSessionPacketObfuscationSecrets( + rootObfuscationSecret ObfuscationSecret, + isInitiator bool) (ObfuscationSecret, ObfuscationSecret, error) { + + send, err := deriveSessionPacketObfuscationSecret( + rootObfuscationSecret, isInitiator, true) + if err != nil { + return ObfuscationSecret{}, ObfuscationSecret{}, errors.Trace(err) + } + + receive, err := deriveSessionPacketObfuscationSecret( + rootObfuscationSecret, isInitiator, false) + if err != nil { + return ObfuscationSecret{}, ObfuscationSecret{}, errors.Trace(err) + } + + return send, receive, nil +} + +// obfuscateSessionPacket wraps a session packet with an obfuscation layer +// which provides: +// +// - indistiguishability from fully random +// - random padding +// - anti-replay +// +// The full-random and padding properties make obfuscated packets appropriate +// to embed in otherwise plaintext transports, such as HTTP, without being +// trivially fingerprintable. +// +// While Noise protocol sessions messages have nonces and associated +// anti-replay for nonces, this measure doen't cover the session handshake, +// so an independent anti-replay mechanism is implemented here. +func obfuscateSessionPacket( + obfuscationSecret ObfuscationSecret, + isInitiator bool, + packet []byte, + paddingMin int, + paddingMax int) ([]byte, error) { + + obfuscatedPacket := make([]byte, obfuscationSessionPacketNonceSize) + + _, err := prng.Read(obfuscatedPacket[:]) + if err != nil { + return nil, errors.Trace(err) + } + + // Initiators add a timestamp within the obfuscated packet. The responder + // uses this value to discard potentially replayed packets which are + // outside the time range of the reponder's anti-replay history. + + // TODO: add a consistent (per-session), random offset to timestamps for + // privacy? + + var timestampedPacket []byte + if isInitiator { + timestampedPacket = binary.AppendVarint(nil, time.Now().Unix()) + } + + paddingSize := prng.Range(paddingMin, paddingMax) + paddedPacket := binary.AppendUvarint(timestampedPacket, uint64(paddingSize)) + + paddedPacket = append(paddedPacket, make([]byte, paddingSize)...) + paddedPacket = append(paddedPacket, packet...) + + block, err := aes.NewCipher(obfuscationSecret[:]) + if err != nil { + return nil, errors.Trace(err) + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return nil, errors.Trace(err) + } + + obfuscatedPacket = aesgcm.Seal( + obfuscatedPacket, + obfuscatedPacket[:obfuscationSessionPacketNonceSize], + paddedPacket, + nil) + + return obfuscatedPacket, nil +} + +// deobfuscateSessionPacket deobfuscates a session packet obfuscated with +// obfuscateSessionPacket and the same deobfuscateSessionPacket. +// +// Responders must supply an obfuscationReplayHistory, which checks for +// replayed session packets (within the time factor). Responders should drop +// into anti-probing response behavior when deobfuscateSessionPacket returns +// an error: the obfuscated packet may have been created by a prober without +// the correct secret; or replayed by a prober. +func deobfuscateSessionPacket( + obfuscationSecret ObfuscationSecret, + isInitiator bool, + replayHistory *obfuscationReplayHistory, + obfuscatedPacket []byte) ([]byte, error) { + + // A responder must provide a relay history, or it's misconfigured. + if isInitiator == (replayHistory != nil) { + return nil, errors.TraceNew("unexpected replay history") + } + + // imitateDeobfuscateSessionPacketDuration is called in early failure + // cases to imitate the elapsed time of lookups and cryptographic + // operations that would otherwise be skipped. This is intended to + // mitigate timing attacks by probers. + // + // Limitation: this doesn't result in a constant time. + + if len(obfuscatedPacket) < obfuscationSessionPacketNonceSize { + imitateDeobfuscateSessionPacketDuration(replayHistory) + return nil, errors.TraceNew("invalid nonce") + } + + nonce := obfuscatedPacket[:obfuscationSessionPacketNonceSize] + + if replayHistory != nil && replayHistory.Lookup(nonce) { + imitateDeobfuscateSessionPacketDuration(nil) + return nil, errors.TraceNew("replayed nonce") + } + + // As an AEAD, AES-GCM authenticates that the sender used the expected + // key, and so has the root obfuscation secret. + + block, err := aes.NewCipher(obfuscationSecret[:]) + if err != nil { + return nil, errors.Trace(err) + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return nil, errors.Trace(err) + } + + plaintext, err := aesgcm.Open( + nil, + nonce, + obfuscatedPacket[obfuscationSessionPacketNonceSize:], + nil) + if err != nil { + return nil, errors.Trace(err) + } + + n := 0 + offset := 0 + timestamp := int64(0) + if replayHistory != nil { + timestamp, n = binary.Varint(plaintext[offset:]) + if timestamp == 0 && n <= 0 { + return nil, errors.TraceNew("invalid timestamp") + } + offset += n + } + paddingSize, n := binary.Uvarint(plaintext[offset:]) + if n < 1 { + return nil, errors.TraceNew("invalid padding size") + } + offset += n + if len(plaintext[offset:]) < int(paddingSize) { + return nil, errors.TraceNew("invalid padding") + } + offset += int(paddingSize) + + if replayHistory != nil { + + // Accept the initiator's timestamp only if it's within +/- + // antiReplayTimeFactorPeriodSeconds/2 of the responder's clock. This + // step discards packets that are outside the range of the replay history. + + now := time.Now().Unix() + if timestamp+antiReplayTimeFactorPeriodSeconds/2 < now { + return nil, errors.TraceNew("timestamp behind") + } + if timestamp-antiReplayTimeFactorPeriodSeconds/2 > now { + return nil, errors.TraceNew("timestamp ahead") + } + + // Now that it's validated, add this packet to the replay history. The + // nonce is expected to be unique, so it's used as the history key. + + replayHistory.Insert(nonce) + } + + return plaintext[offset:], nil +} + +func imitateDeobfuscateSessionPacketDuration(replayHistory *obfuscationReplayHistory) { + + // Limitations: only one block is decrypted; crypto/aes or + // crypto/cipher.GCM may not be constant time, depending on hardware + // support; at best, this all-zeros invocation will make it as far as + // GCM.Open, and not check padding. + + const ( + blockSize = 16 + tagSize = 16 + ) + var secret ObfuscationSecret + var packet [obfuscationSessionPacketNonceSize + blockSize + tagSize]byte + if replayHistory != nil { + _ = replayHistory.Lookup(packet[:obfuscationSessionPacketNonceSize]) + } + _, _ = deobfuscateSessionPacket(secret, true, nil, packet[:]) +} + +// obfuscationReplayHistory provides a lookup for recently observed obfuscated +// session packet nonces. History is maintained for +// 2*antiReplayTimeFactorPeriodSeconds; it's assumed that older packets, if +// replayed, will fail to deobfuscate due to using an expired timestamp. +type obfuscationReplayHistory struct { + mutex sync.Mutex + filters [2]*bloom.BloomFilter + currentFilter int + switchTime time.Time +} + +func newObfuscationReplayHistory() *obfuscationReplayHistory { + + // Replay history is implemented using bloom filters, which use fixed + // space overhead, and less space overhead than storing nonces explictly + // under anticipated loads. With bloom filters, false positive lookups + // are possible, but false negative lookups are not. So there's a small + // chance that a non-replayed nonce will be flagged as in the history, + // but no chance that a replayed nonce will pass as not in the history. + // + // With obfuscationAntiReplayHistorySize set to 10M and a false positive + // rate of 0.001, the session_test test case with 10k clients making 100 + // requests each all within one time period consistently produces no + // false positives. + // + // Memory overhead is approximately 18MB per bloom filter, so 18MB x 2. + // From: + // + // m, _ := bloom.EstimateParameters(10_000_000, 0.001) --> 143775876 + // bitset.New(143775876).BinaryStorageSize() --> approx. 18MB in terms of + // underlying bits-and-blooms/bitset.BitSet + // + // To accomodate the rolling time factor window, there are two rotating + // bloom filters. + + return &obfuscationReplayHistory{ + filters: [2]*bloom.BloomFilter{ + bloom.NewWithEstimates(obfuscationAntiReplayHistorySize, 0.001), + bloom.NewWithEstimates(obfuscationAntiReplayHistorySize, 0.001), + }, + currentFilter: 0, + switchTime: time.Now(), + } +} + +func (h *obfuscationReplayHistory) Insert(value []byte) { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.switchFilters() + + h.filters[h.currentFilter].Add(value) +} + +func (h *obfuscationReplayHistory) Lookup(value []byte) bool { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.switchFilters() + + return h.filters[0].Test(value) || + h.filters[1].Test(value) +} + +func (h *obfuscationReplayHistory) switchFilters() { + + // Assumes caller holds h.mutex lock. + + now := time.Now() + if h.switchTime.Before(now.Add(-time.Duration(antiReplayTimeFactorPeriodSeconds) * time.Second)) { + h.currentFilter = (h.currentFilter + 1) % 2 + h.filters[h.currentFilter].ClearAll() + h.switchTime = now + } +} diff --git a/psiphon/common/inproxy/obfuscation_test.go b/psiphon/common/inproxy/obfuscation_test.go new file mode 100644 index 000000000..2987e0b0e --- /dev/null +++ b/psiphon/common/inproxy/obfuscation_test.go @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "bytes" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func FuzzSessionPacketDeobfuscation(f *testing.F) { + + packet := prng.Padding(100, 1000) + minPadding := 1 + maxPadding := 1000 + + rootSecret, err := GenerateRootObfuscationSecret() + if err != nil { + f.Fatalf(errors.Trace(err).Error()) + } + + n := 10 + + originals := make([][]byte, n) + + for i := 0; i < n; i++ { + + obfuscatedPacket, err := obfuscateSessionPacket( + rootSecret, true, packet, minPadding, maxPadding) + if err != nil { + f.Fatalf(errors.Trace(err).Error()) + } + + originals[i] = obfuscatedPacket + + f.Add(obfuscatedPacket) + } + + f.Fuzz(func(t *testing.T, obfuscatedPacket []byte) { + + // Make a new history each time to bypass the replay check and focus + // on fuzzing the parsing code. + + _, err := deobfuscateSessionPacket( + rootSecret, + false, + newObfuscationReplayHistory(), + obfuscatedPacket) + + // Only the original, valid messages should successfully deobfuscate. + + inOriginals := false + for i := 0; i < n; i++ { + if bytes.Equal(originals[i], obfuscatedPacket) { + inOriginals = true + break + } + } + + if (err == nil) != inOriginals { + f.Errorf("unexpected deobfuscation result") + } + }) +} + +func TestSessionPacketObfuscation(t *testing.T) { + err := runTestSessionPacketObfuscation() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestSessionPacketObfuscation() error { + + // Use a replay time period factor more suitable for test runs. + + originalAntiReplayTimeFactorPeriodSeconds := antiReplayTimeFactorPeriodSeconds + antiReplayTimeFactorPeriodSeconds = 2 + defer func() { + antiReplayTimeFactorPeriodSeconds = originalAntiReplayTimeFactorPeriodSeconds + }() + + rootSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return errors.Trace(err) + } + + initiatorSendSecret, initiatorReceiveSecret, err := + deriveSessionPacketObfuscationSecrets(rootSecret, true) + if err != nil { + return errors.Trace(err) + } + + responderSendSecret, responderReceiveSecret, err := + deriveSessionPacketObfuscationSecrets(rootSecret, false) + if err != nil { + return errors.Trace(err) + } + + replayHistory := newObfuscationReplayHistory() + + // Test: obfuscate/deobfuscate initiator -> responder + + packet := prng.Bytes(1000) + minPadding := 1 + maxPadding := 1000 + + obfuscatedPacket1, err := obfuscateSessionPacket( + initiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + packet1, err := deobfuscateSessionPacket( + responderReceiveSecret, false, replayHistory, obfuscatedPacket1) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(packet1, packet) { + return errors.TraceNew("unexpected deobfuscated packet") + } + + // Test: replay packet + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, replayHistory, obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected replay success") + } + + // Test: replay packet after time factor period + + time.Sleep(time.Duration(antiReplayTimeFactorPeriodSeconds) * time.Second) + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, replayHistory, obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected replay success") + } + + // Test: different packet sizes (due to padding) + + n := 10 + for i := 0; i < n; i++ { + obfuscatedPacket2, err := obfuscateSessionPacket( + initiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + if len(obfuscatedPacket1) != len(obfuscatedPacket2) { + break + } + if i == n-1 { + return errors.TraceNew("unexpected same size") + } + } + + // Test: obfuscate/deobfuscate responder -> initiator + + obfuscatedPacket2, err := obfuscateSessionPacket( + responderSendSecret, false, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + packet2, err := deobfuscateSessionPacket( + initiatorReceiveSecret, true, nil, obfuscatedPacket2) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(packet2, packet) { + return errors.TraceNew("unexpected deobfuscated packet") + } + + // Test: initiator -> initiator + + obfuscatedPacket1, err = obfuscateSessionPacket( + initiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + _, err = deobfuscateSessionPacket( + initiatorReceiveSecret, true, nil, obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected initiator -> initiator success") + } + + // Test: responder -> responder + + obfuscatedPacket2, err = obfuscateSessionPacket( + responderSendSecret, false, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, newObfuscationReplayHistory(), obfuscatedPacket2) + if err == nil { + return errors.TraceNew("unexpected responder -> responder success") + } + + // Test: distinct keys derived for each direction + + isInitiator := true + secret1, err := deriveSessionPacketObfuscationSecret( + rootSecret, isInitiator, true) + if err != nil { + return errors.Trace(err) + } + + isInitiator = false + secret2, err := deriveSessionPacketObfuscationSecret( + rootSecret, isInitiator, true) + if err != nil { + return errors.Trace(err) + } + + err = testMostlyDifferent(secret1[:], secret2[:]) + if err != nil { + return errors.Trace(err) + } + + // Test: for identical packet with same padding and derived key, most + // bytes different (due to nonce) + + padding := 100 + + obfuscatedPacket1, err = obfuscateSessionPacket( + initiatorSendSecret, true, packet, padding, padding) + if err != nil { + return errors.Trace(err) + } + + obfuscatedPacket2, err = obfuscateSessionPacket( + initiatorSendSecret, true, packet, padding, padding) + if err != nil { + return errors.Trace(err) + } + + err = testMostlyDifferent(obfuscatedPacket1, obfuscatedPacket2) + if err != nil { + return errors.Trace(err) + } + + // Test: uniformly random + + for _, isInitiator := range []bool{true, false} { + + err = testEntropy(func() ([]byte, error) { + secret := initiatorSendSecret + if !isInitiator { + secret = responderSendSecret + } + obfuscatedPacket, err := obfuscateSessionPacket( + secret, isInitiator, packet, padding, padding) + if err != nil { + return nil, errors.Trace(err) + } + return obfuscatedPacket, nil + }) + if err != nil { + return errors.Trace(err) + } + } + + // Test: wrong obfuscation secret + + wrongRootSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return errors.Trace(err) + } + + wrongInitiatorSendSecret, _, err := + deriveSessionPacketObfuscationSecrets(wrongRootSecret, true) + if err != nil { + return errors.Trace(err) + } + + obfuscatedPacket1, err = obfuscateSessionPacket( + wrongInitiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, newObfuscationReplayHistory(), obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected wrong secret success") + } + + // Test: truncated obfuscated packet + + obfuscatedPacket1, err = obfuscateSessionPacket( + initiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + obfuscatedPacket1 = obfuscatedPacket1[:len(obfuscatedPacket1)-1] + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, newObfuscationReplayHistory(), obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected truncated packet success") + } + + // Test: flip byte + + obfuscatedPacket1, err = obfuscateSessionPacket( + initiatorSendSecret, true, packet, minPadding, maxPadding) + if err != nil { + return errors.Trace(err) + } + + obfuscatedPacket1[len(obfuscatedPacket1)-1] ^= 1 + + _, err = deobfuscateSessionPacket( + responderReceiveSecret, false, newObfuscationReplayHistory(), obfuscatedPacket1) + if err == nil { + return errors.TraceNew("unexpected modified packet success") + } + + return nil +} + +func TestObfuscationReplayHistory(t *testing.T) { + err := runTestObfuscationReplayHistory() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestObfuscationReplayHistory() error { + + replayHistory := newObfuscationReplayHistory() + + size := obfuscationSessionPacketNonceSize + + count := int(obfuscationAntiReplayHistorySize / 100) + + // Test: values found as expected; no false positives + + for i := 0; i < count; i++ { + + value := prng.Bytes(size) + + if replayHistory.Lookup(value) { + return errors.Tracef("value found on iteration %d", i) + } + + replayHistory.Insert(value) + + if !replayHistory.Lookup(value) { + return errors.Tracef("value not found on iteration %d", i) + } + } + + return nil +} + +func testMostlyDifferent(a, b []byte) error { + + if len(a) != len(b) { + return errors.TraceNew("unexpected different size") + } + + equalBytes := 0 + for i := 0; i < len(a); i++ { + if a[i] == b[i] { + equalBytes += 1 + } + } + + // TODO: use a stricter threshold? + if equalBytes > len(a)/10 { + return errors.Tracef("unexpected similar bytes: %d/%d", equalBytes, len(a)) + } + + return nil +} + +func testEntropy(f func() ([]byte, error)) error { + + bitCount := make(map[int]int) + + n := 10000 + + for i := 0; i < n; i++ { + + value, err := f() + if err != nil { + return errors.Trace(err) + } + + for j := 0; j < len(value); j++ { + for k := 0; k < 8; k++ { + bit := (uint8(value[j]) >> k) & 0x1 + bitCount[(j*8)+k] += int(bit) + } + } + + } + + // TODO: use a stricter threshold? + for index, count := range bitCount { + if count < n/3 || count > 2*n/3 { + return errors.Tracef("unexpected entropy at %d: %v", index, bitCount) + } + } + + return nil +} diff --git a/psiphon/common/inproxy/portmapper.go b/psiphon/common/inproxy/portmapper.go new file mode 100644 index 000000000..d1ec94a76 --- /dev/null +++ b/psiphon/common/inproxy/portmapper.go @@ -0,0 +1,252 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "fmt" + "sync" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "tailscale.com/net/portmapper" + "tailscale.com/util/clientmetric" +) + +// initPortMapper resets port mapping metrics state associated with the +// current network when the network changes, as indicated by +// WebRTCDialCoordinator.NetworkID. initPortMapper also configures the port +// mapping routines to use WebRTCDialCoordinator.BindToDevice. Varying +// WebRTCDialCoordinator.BindToDevice between dials in a single process is not +// supported. +func initPortMapper(coordinator WebRTCDialCoordinator) { + + // It's safe for multiple, concurrent client dials to call + // resetRespondingPortMappingTypes: as long as the network ID does not + // change, calls won't clear any valid port mapping type metrics that + // were just recorded. + resetRespondingPortMappingTypes(coordinator.NetworkID()) + + // WebRTCDialCoordinator.BindToDevice is set as a global variable in + // tailscale.com/net/portmapper. It's safe to repeatedly call + // setPortMapperBindToDevice here, under the assumption that + // WebRTCDialCoordinator.BindToDevice is the same single, static function + // for all dials. This assumption is true for Psiphon. + setPortMapperBindToDevice(coordinator) +} + +// portMapper represents a UDP port mapping from a local port to an external, +// publicly addressable IP and port. Port mapping is implemented using +// tailscale.com/net/portmapper, which probes the local network and gateway +// for UPnP-IGD, NAT-PMP, and PCP port mapping capabilities. +type portMapper struct { + havePortMappingOnce sync.Once + portMappingAddress chan string + client *portmapper.Client +} + +// newPortMapper initializes a new port mapper, configured to map to the +// specified localPort. newPortMapper does not initiate any network +// operations (it's safe to call when DisablePortMapping is set). +func newPortMapper( + logger common.Logger, + localPort int) *portMapper { + + portMappingLogger := func(format string, args ...any) { + logger.WithTrace().Info("port mapping: " + fmt.Sprintf(format, args)) + } + + p := &portMapper{ + portMappingAddress: make(chan string, 1), + } + + // This code assumes assumes tailscale NewClient call does only + // initialization; this is the case as of tailscale.com/net/portmapper + // v1.36.2. + // + // This code further assumes that the onChanged callback passed to + // NewClient will not be invoked until after the + // GetCachedMappingOrStartCreatingOne call in portMapper.start; and so + // the p.client reference within callback will be valid. + + client := portmapper.NewClient(portMappingLogger, nil, nil, func() { + p.havePortMappingOnce.Do(func() { + address, ok := p.client.GetCachedMappingOrStartCreatingOne() + if ok { + // With sync.Once and a buffer size of 1, this send won't block. + p.portMappingAddress <- address.String() + } else { + + // This is not an expected case; there should be a port + // mapping when NewClient is invoked. + // + // TODO: deliver "" to the channel? Otherwise, receiving on + // portMapper.portMappingExternalAddress will hang, or block + // until a context is done. + portMappingLogger("unexpected missing port mapping") + } + }) + }) + + p.client = client + + p.client.SetLocalPort(uint16(localPort)) + + return p +} + +// start initiates the port mapping attempt. +func (p *portMapper) start() { + _, _ = p.client.GetCachedMappingOrStartCreatingOne() +} + +// portMappingExternalAddress returns a channel which receives a successful +// port mapping external address, if any. +func (p *portMapper) portMappingExternalAddress() <-chan string { + return p.portMappingAddress +} + +// close releases the port mapping +func (p *portMapper) close() error { + return errors.Trace(p.client.Close()) +} + +// probePortMapping discovers and reports which port mapping protocols are +// supported on this network. probePortMapping does not establish a port mapping. +// +// It is intended that in-proxies amake a blocking call to probePortMapping on +// start up (and after a network change) in order to report fresh port +// mapping type metrics, for matching optimization in the ProxyAnnounce +// request. Clients don't incur the delay of a probe call -- which produces +// no port mapping -- and instead opportunistically grab port mapping type +// metrics via getRespondingPortMappingTypes. +func probePortMapping( + ctx context.Context, + logger common.Logger) (PortMappingTypes, error) { + + portMappingLogger := func(format string, args ...any) { + logger.WithTrace().Info("port mapping probe: " + fmt.Sprintf(format, args)) + } + + client := portmapper.NewClient(portMappingLogger, nil, nil, nil) + defer client.Close() + + result, err := client.Probe(ctx) + if err != nil { + return nil, errors.Trace(err) + } + + portMappingTypes := PortMappingTypes{} + if result.UPnP { + portMappingTypes = append(portMappingTypes, PortMappingTypeUPnP) + } + if result.PMP { + portMappingTypes = append(portMappingTypes, PortMappingTypePMP) + } + if result.PCP { + portMappingTypes = append(portMappingTypes, PortMappingTypePCP) + } + + // An empty lists means discovery is needed or the available port mappings + // are unknown; a list with None indicates that a probe returned no + // supported port mapping types. + + if len(portMappingTypes) == 0 { + portMappingTypes = append(portMappingTypes, PortMappingTypeNone) + } + + return portMappingTypes, nil +} + +var respondingPortMappingTypesMutex sync.Mutex +var respondingPortMappingTypesNetworkID string + +// resetRespondingPortMappingTypes clears tailscale.com/net/portmapper global +// metrics fields which indicate which port mapping types are responding on +// the current network. These metrics should be cleared whenever the current +// network changes, as indicated by networkID. +// +// Limitations: there may be edge conditions where a +// tailscale.com/net/portmapper client logs metrics concurrent to +// resetRespondingPortMappingTypes being called with a new networkID. If +// incorrect port mapping type metrics are supported, the Broker may log +// incorrect statistics. However, Broker client/in-proxy matching is based on +// actually established port mappings. +func resetRespondingPortMappingTypes(networkID string) { + + respondingPortMappingTypesMutex.Lock() + defer respondingPortMappingTypesMutex.Unlock() + + if respondingPortMappingTypesNetworkID != networkID { + // Iterating over all metric fields appears to be the only API available. + for _, metric := range clientmetric.Metrics() { + switch metric.Name() { + case "portmap_upnp_ok", "portmap_pmp_ok", "portmap_pcp_ok": + metric.Set(0) + } + } + respondingPortMappingTypesNetworkID = networkID + } +} + +// getRespondingPortMappingTypes returns the port mapping types that responded +// during recent portMapper.start invocations as well as probePortMapping +// invocations. The returned list is used for reporting metrics. See +// resetRespondingPortMappingTypes for considerations due to accessing +// tailscale.com/net/portmapper global metrics fields. +// +// To avoid delays, we do not run probePortMapping for regular client dials, +// and so instead use this tailscale.com/net/portmapper metrics field +// approach. +// +// Limitations: the return value represents all port mapping types that +// responded in this session, since the last network change +// (resetRespondingPortMappingTypes call); and do not indicate which of +// several port mapping types may have been used for a particular dial. +func getRespondingPortMappingTypes(networkID string) PortMappingTypes { + + respondingPortMappingTypesMutex.Lock() + defer respondingPortMappingTypesMutex.Unlock() + + portMappingTypes := PortMappingTypes{} + + if respondingPortMappingTypesNetworkID != networkID { + // The network changed since the last resetRespondingPortMappingTypes + // call, and resetRespondingPortMappingTypes has not yet been called + // again. Ignore the current metrics. + return portMappingTypes + } + + // Iterating over all metric fields appears to be the only API available. + for _, metric := range clientmetric.Metrics() { + if metric.Name() == "portmap_upnp_ok" && metric.Value() > 1 { + portMappingTypes = append(portMappingTypes, PortMappingTypeUPnP) + } + if metric.Name() == "portmap_pmp_ok" && metric.Value() > 1 { + portMappingTypes = append(portMappingTypes, PortMappingTypePMP) + } + if metric.Name() == "portmap_pcp_ok" && metric.Value() > 1 { + portMappingTypes = append(portMappingTypes, PortMappingTypePCP) + } + } + return portMappingTypes +} diff --git a/psiphon/common/inproxy/portmapper_android.go b/psiphon/common/inproxy/portmapper_android.go new file mode 100644 index 000000000..eca778fb4 --- /dev/null +++ b/psiphon/common/inproxy/portmapper_android.go @@ -0,0 +1,30 @@ +//go:build PSIPHON_ENABLE_INPROXY && android + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "tailscale.com/net/netns" +) + +func setPortMapperBindToDevice(coordinator WebRTCDialCoordinator) { + netns.SetAndroidProtectFunc(coordinator.BindToDevice) +} diff --git a/psiphon/common/inproxy/portmapper_other.go b/psiphon/common/inproxy/portmapper_other.go new file mode 100644 index 000000000..e06d23322 --- /dev/null +++ b/psiphon/common/inproxy/portmapper_other.go @@ -0,0 +1,28 @@ +//go:build PSIPHON_ENABLE_INPROXY && !android + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +func setPortMapperBindToDevice(_ WebRTCDialCoordinator) { + // BindToDevice is not applied on iOS as tailscale.com/net/netns does not + // have an equivilent to SetAndroidProtectFunc for iOS. At this time, + // BindToDevice operations on iOS are legacy code and not required. +} diff --git a/psiphon/common/inproxy/proxy.go b/psiphon/common/inproxy/proxy.go new file mode 100644 index 000000000..05610f7ec --- /dev/null +++ b/psiphon/common/inproxy/proxy.go @@ -0,0 +1,925 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" +) + +const ( + proxyAnnounceDelay = 1 * time.Second + proxyAnnounceDelayJitter = 0.5 + proxyAnnounceMaxBackoffDelay = 1 * time.Hour + proxyWebRTCAnswerTimeout = 20 * time.Second + proxyDestinationDialTimeout = 20 * time.Second +) + +// Proxy is the in-proxy proxying component, which relays traffic from a +// client to a Psiphon server. +type Proxy struct { + // Note: 64-bit ints used with atomic operations are placed + // at the start of struct to ensure 64-bit alignment. + // (https://golang.org/pkg/sync/atomic/#pkg-note-BUG) + bytesUp int64 + bytesDown int64 + peakBytesUp int64 + peakBytesDown int64 + connectingClients int32 + connectedClients int32 + + config *ProxyConfig + activityUpdateWrapper *activityUpdateWrapper + + networkDiscoveryMutex sync.Mutex + networkDiscoveryRunOnce bool + networkDiscoveryNetworkID string + + nextAnnounceMutex sync.Mutex + nextAnnounceBrokerClient *BrokerClient + nextAnnounceNotBefore time.Time +} + +// TODO: add PublicNetworkAddress/ListenNetworkAddress to facilitate manually +// configured, permanent port mappings. + +// ProxyConfig specifies the configuration for a Proxy run. +type ProxyConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // EnableWebRTCDebugLogging indicates whether to emit WebRTC debug logs. + EnableWebRTCDebugLogging bool + + // WaitForNetworkConnectivity is a callback that should block until there + // is network connectivity or shutdown. The return value is true when + // there is network connectivity, and false for shutdown. + WaitForNetworkConnectivity func() bool + + // GetBrokerClient provides a BrokerClient which the proxy will use for + // making broker requests. If GetBrokerClient returns a shared + // BrokerClient instance, the BrokerClient must support multiple, + // concurrent round trips, as the proxy will use it to concurrently + // announce many proxy instances. The BrokerClient should be implemented + // using multiplexing over a shared network connection -- for example, + // HTTP/2 -- and a shared broker session for optimal performance. + GetBrokerClient func() (*BrokerClient, error) + + // GetBaseAPIParameters returns Psiphon API parameters to be sent to and + // logged by the broker. Expected parameters include client/proxy + // application and build version information. GetBaseAPIParameters also + // returns the network ID, corresponding to the parameters, to be used in + // tactics logic; the network ID is not sent to the broker. + GetBaseAPIParameters func() (common.APIParameters, string, error) + + // MakeWebRTCDialCoordinator provides a WebRTCDialCoordinator which + // specifies WebRTC-related dial parameters, including selected STUN + // server addresses; network topology information for the current netork; + // NAT logic settings; and other settings. + // + // MakeWebRTCDialCoordinator is invoked for each proxy/client connection, + // and the provider can select new parameters per connection as reqired. + MakeWebRTCDialCoordinator func() (WebRTCDialCoordinator, error) + + // HandleTacticsPayload is a callback that receives any tactics payload, + // provided by the broker in proxy announcement request responses. + // HandleTacticsPayload must return true when the tacticsPayload includes + // new tactics, indicating that the proxy should reinitialize components + // controlled by tactics parameters. + HandleTacticsPayload func(networkID string, tacticsPayload []byte) bool + + // OperatorMessageHandler is a callback that is invoked with any user + // message JSON object that is sent to the Proxy from the Broker. This + // facility may be used to alert proxy operators when required. The JSON + // object schema is arbitrary and not defined here. + OperatorMessageHandler func(messageJSON string) + + // MaxClients is the maximum number of clients that are allowed to connect + // to the proxy. + MaxClients int + + // LimitUpstreamBytesPerSecond limits the upstream data transfer rate for + // a single client. When 0, there is no limit. + LimitUpstreamBytesPerSecond int + + // LimitDownstreamBytesPerSecond limits the downstream data transfer rate + // for a single client. When 0, there is no limit. + LimitDownstreamBytesPerSecond int + + // ActivityUpdater specifies an ActivityUpdater for activity associated + // with this proxy. + ActivityUpdater ActivityUpdater +} + +// ActivityUpdater is a callback that is invoked when clients connect and +// disconnect and periodically with data transfer updates (unless idle). This +// callback may be used to update an activity UI. This callback should post +// this data to another thread or handler and return immediately and not +// block on UI updates. +type ActivityUpdater func( + connectingClients int32, + connectedClients int32, + bytesUp int64, + bytesDown int64, + bytesDuration time.Duration) + +// NewProxy initializes a new Proxy with the specified configuration. +func NewProxy(config *ProxyConfig) (*Proxy, error) { + + p := &Proxy{ + config: config, + } + + p.activityUpdateWrapper = &activityUpdateWrapper{p: p} + + return p, nil +} + +// activityUpdateWrapper implements the psiphon/common.ActivityUpdater +// interface and is used to receive bytes transferred updates from the +// ActivityConns wrapping proxied traffic. A wrapper is used so that +// UpdateProgress is not exported from Proxy. +type activityUpdateWrapper struct { + p *Proxy +} + +func (w *activityUpdateWrapper) UpdateProgress(bytesRead, bytesWritten int64, _ int64) { + atomic.AddInt64(&w.p.bytesUp, bytesWritten) + atomic.AddInt64(&w.p.bytesDown, bytesRead) +} + +// Run runs the proxy. The proxy sends requests to the Broker announcing its +// availability; the Broker matches the proxy with clients, and facilitates +// an exchange of WebRTC connection information; the proxy and each client +// attempt to establish a connection; and the client's traffic is relayed to +// Psiphon server. +// +// Run ends when ctx is Done. A proxy run may continue across underlying +// network changes assuming that the ProxyConfig GetBrokerClient and +// MakeWebRTCDialCoordinator callbacks react to network changes and provide +// instances that are reflect network changes. +func (p *Proxy) Run(ctx context.Context) { + + // Run MaxClient proxying workers. Each worker handles one client at a time. + + proxyWaitGroup := new(sync.WaitGroup) + + // Launch the first proxy worker, passing a signal to be triggered once + // the very first announcement round trip is complete. The first round + // trip is awaited so that: + // + // - The first announce response will arrive with any new tactics, + // avoiding a start up case where MaxClients initial, concurrent + // announces all return with no-match and a tactics payload. + // + // - The first worker gets no announcement delay and is also guaranteed to + // be the shared session establisher. Since the announcement delays are + // applied _after_ waitToShareSession, it would otherwise be possible, + // with a race of MaxClient initial, concurrent announces, for the + // session establisher to be a different worker than the no-delay worker. + + signalFirstAnnounceCtx, signalFirstAnnounceDone := + context.WithCancel(context.Background()) + + proxyWaitGroup.Add(1) + go func() { + defer proxyWaitGroup.Done() + p.proxyClients(ctx, signalFirstAnnounceDone) + }() + + select { + case <-signalFirstAnnounceCtx.Done(): + case <-ctx.Done(): + return + } + + // Launch the remaining workers. + + for i := 0; i < p.config.MaxClients-1; i++ { + proxyWaitGroup.Add(1) + go func() { + defer proxyWaitGroup.Done() + p.proxyClients(ctx, nil) + }() + } + + // Capture activity updates every second, which is the required frequency + // for PeakUp/DownstreamBytesPerSecond. This is also a reasonable + // frequency for invoking the ActivityUpdater and updating UI widgets. + + activityUpdatePeriod := 1 * time.Second + ticker := time.NewTicker(activityUpdatePeriod) + defer ticker.Stop() + +loop: + for { + select { + case <-ticker.C: + p.activityUpdate(activityUpdatePeriod) + case <-ctx.Done(): + break loop + } + } + + proxyWaitGroup.Wait() +} + +// getAnnounceDelayParameters is a helper that fetches the proxy announcement +// delay parameters from the current broker client. +// +// getAnnounceDelayParameters is used to configure a delay when +// proxyOneClient fails. As having no broker clients is a possible +// proxyOneClient failure case, GetBrokerClient errors are ignored here and +// defaults used in that case. +func (p *Proxy) getAnnounceDelayParameters() (time.Duration, float64) { + brokerClient, err := p.config.GetBrokerClient() + if err != nil { + return proxyAnnounceDelay, proxyAnnounceDelayJitter + } + brokerCoordinator := brokerClient.GetBrokerDialCoordinator() + return common.ValueOrDefault(brokerCoordinator.AnnounceDelay(), proxyAnnounceDelay), + common.ValueOrDefault(brokerCoordinator.AnnounceDelayJitter(), proxyAnnounceDelayJitter) + +} + +func (p *Proxy) activityUpdate(period time.Duration) { + + connectingClients := atomic.LoadInt32(&p.connectingClients) + connectedClients := atomic.LoadInt32(&p.connectedClients) + bytesUp := atomic.SwapInt64(&p.bytesUp, 0) + bytesDown := atomic.SwapInt64(&p.bytesDown, 0) + + greaterThanSwapInt64(&p.peakBytesUp, bytesUp) + greaterThanSwapInt64(&p.peakBytesDown, bytesDown) + + if connectingClients == 0 && + connectedClients == 0 && + bytesUp == 0 && + bytesDown == 0 { + // Skip the activity callback on idle. + return + } + + p.config.ActivityUpdater( + connectingClients, + connectedClients, + bytesUp, + bytesDown, + period) +} + +func greaterThanSwapInt64(addr *int64, new int64) bool { + + // Limitation: if there are two concurrent calls, the greater value could + // get overwritten. + + old := atomic.LoadInt64(addr) + if new > old { + return atomic.CompareAndSwapInt64(addr, old, new) + } + return false +} + +func (p *Proxy) proxyClients( + ctx context.Context, signalAnnounceDone func()) { + + // Proxy one client, repeating until ctx is done. + // + // This worker starts with posting a long-polling announcement request. + // The broker response with a matched client, and the proxy and client + // attempt to establish a WebRTC connection for relaying traffic. + // + // Limitation: this design may not maximize the utility of the proxy, + // since some proxy/client connections will fail at the WebRTC stage due + // to NAT traversal failure, and at most MaxClient concurrent + // establishments are attempted. Another scenario comes from the Psiphon + // client horse race, which may start in-proxy dials but then abort them + // when some other tunnel protocol succeeds. + // + // As a future enhancement, consider using M announcement goroutines and N + // WebRTC dial goroutines. When an announcement gets a response, + // immediately announce again unless there are already MaxClient active + // connections established. This approach may require the proxy to + // backpedal and reject connections when establishment is too successful. + // + // Another enhancement could be a signal from the client, to the broker, + // relayed to the proxy, when a dial is aborted. + + failureDelayFactor := time.Duration(1) + + for i := 0; ctx.Err() == nil; i++ { + + if !p.config.WaitForNetworkConnectivity() { + break + } + + backOff, err := p.proxyOneClient(ctx, signalAnnounceDone) + + if err != nil && ctx.Err() == nil { + + p.config.Logger.WithTraceFields( + common.LogFields{ + "error": err.Error(), + }).Error("proxy client failed") + + // Apply a simple exponential backoff based on whether + // proxyOneClient either relayed client traffic or got no match, + // or encountered a failure. + // + // The proxyOneClient failure could range from local + // configuration (no broker clients) to network issues(failure to + // completely establish WebRTC connection) and this backoff + // prevents both excess local logging and churning in the former + // case and excessive bad service to clients or unintentionally + // overloading the broker in the latter case. + // + // TODO: specific tactics parameters to control this logic. + + delay, jitter := p.getAnnounceDelayParameters() + + if !backOff { + failureDelayFactor = 1 + } + delay = delay * failureDelayFactor + if delay > proxyAnnounceMaxBackoffDelay { + delay = proxyAnnounceMaxBackoffDelay + } + if failureDelayFactor < 1<<20 { + failureDelayFactor *= 2 + } + + common.SleepWithJitter(ctx, delay, jitter) + } + } +} + +// resetNetworkDiscovery resets the network discovery state, which will force +// another network discovery when doNetworkDiscovery is invoked. +// resetNetworkDiscovery is called when new tactics have been received from +// the broker, as new tactics may change parameters that control network +// discovery. +func (p *Proxy) resetNetworkDiscovery() { + p.networkDiscoveryMutex.Lock() + defer p.networkDiscoveryMutex.Unlock() + + p.networkDiscoveryRunOnce = false + p.networkDiscoveryNetworkID = "" +} + +func (p *Proxy) doNetworkDiscovery( + ctx context.Context, + webRTCCoordinator WebRTCDialCoordinator) { + + // Allow only one concurrent network discovery. In practise, this may + // block all other proxyOneClient goroutines while one single goroutine + // runs doNetworkDiscovery. Subsequently, all other goroutines will find + // networkDiscoveryRunOnce is true and use the cached results. + p.networkDiscoveryMutex.Lock() + defer p.networkDiscoveryMutex.Unlock() + + networkID := webRTCCoordinator.NetworkID() + + if p.networkDiscoveryRunOnce && + p.networkDiscoveryNetworkID == networkID { + // Already ran discovery for this network. + return + } + + // Reset and configure port mapper component, as required. See + // initPortMapper comment. + initPortMapper(webRTCCoordinator) + + // Gather local network NAT/port mapping metrics before sending any + // announce requests. NAT topology metrics are used by the Broker to + // optimize client and in-proxy matching. Unlike the client, we always + // perform this synchronous step here, since waiting doesn't necessarily + // block a client tunnel dial. + + waitGroup := new(sync.WaitGroup) + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + + // NATDiscover may use cached NAT type/port mapping values from + // DialParameters, based on the network ID. If discovery is not + // successful, the proxy still proceeds to announce. + + NATDiscover( + ctx, + &NATDiscoverConfig{ + Logger: p.config.Logger, + WebRTCDialCoordinator: webRTCCoordinator, + }) + + }() + waitGroup.Wait() + + p.networkDiscoveryRunOnce = true + p.networkDiscoveryNetworkID = networkID +} + +func (p *Proxy) proxyOneClient( + ctx context.Context, signalAnnounceDone func()) (bool, error) { + + // Do not trigger back-off unless the proxy successfully announces and + // only then performs poorly. + // + // A no-match response should not trigger back-off, nor should broker + // request transport errors which may include non-200 responses due to + // CDN timeout mismatches or TLS errors due to CDN TLS fingerprint + // incompatibility. + + backOff := false + + // Get a new WebRTCDialCoordinator, which should be configured with the + // latest network tactics. + webRTCCoordinator, err := p.config.MakeWebRTCDialCoordinator() + if err != nil { + return backOff, errors.Trace(err) + } + + // Perform network discovery, to determine NAT type and other network + // topology information that is reported to the broker in the proxy + // announcement and used to optimize proxy/client matching. Unlike + // clients, which can't easily delay dials in the tunnel establishment + // horse race, proxies will always perform network discovery. + // doNetworkDiscovery allows only one concurrent discovery and caches + // results for the current network (as determined by + // WebRTCCoordinator.GetNetworkID), so when multiple proxyOneClient + // goroutines call doNetworkDiscovery, at most one discovery is performed + // per network. + p.doNetworkDiscovery(ctx, webRTCCoordinator) + + // Send the announce request + + // At this point, no NAT traversal operations have been performed by the + // proxy, since its announcement may sit idle for the long-polling period + // and NAT hole punches or port mappings could expire before the + // long-polling period. + // + // As a future enhancement, the proxy could begin gathering WebRTC ICE + // candidates while awaiting a client match, reducing the turn around + // time after a match. This would make sense if there's high demand for + // proxies, and so hole punches unlikely to expire while awaiting a client match. + // + // Another possibility may be to prepare and send a full offer SDP in the + // announcment; and have the broker modify either the proxy or client + // offer SDP to produce an answer SDP. In this case, the entire + // ProxyAnswerRequest could be skipped as the WebRTC dial can begin after + // the ProxyAnnounceRequest response (and ClientOfferRequest response). + // + // Furthermore, if a port mapping can be established, instead of using + // WebRTC the proxy could run a Psiphon tunnel protocol listener at the + // mapped port and send the dial information -- including some secret to + // authenticate the client -- in its announcement. The client would then + // receive this direct dial information from the broker and connect. The + // proxy should be able to send keep alives to extend the port mapping + // lifetime. + + brokerClient, err := p.config.GetBrokerClient() + if err != nil { + return backOff, errors.Trace(err) + } + + brokerCoordinator := brokerClient.GetBrokerDialCoordinator() + + // Get the base Psiphon API parameters and additional proxy metrics, + // including performance information, which is sent to the broker in the + // proxy announcment. + // + // tacticsNetworkID is the exact network ID that corresponds to the + // tactics tag sent in the base parameters; this is passed to + // HandleTacticsPayload in order to double check that any tactics + // returned in the proxy announcment response are associated and stored + // with the original network ID. + + metrics, tacticsNetworkID, err := p.getMetrics(webRTCCoordinator) + if err != nil { + return backOff, errors.Trace(err) + } + + // Set a delay before announcing, to stagger the announce request times. + // The delay helps to avoid triggering rate limits or similar errors from + // any intermediate CDN between the proxy and the broker; and provides a + // nudge towards better load balancing across multiple large MaxClients + // proxies, as the broker primarily matches enqueued announces in FIFO + // order, since older announces expire earlier. + // + // The delay is intended to be applied after doNetworkDiscovery, which has + // no reason to be delayed; and also after any waitToShareSession delay, + // as delaying before waitToShareSession can result in the announce + // request times collapsing back together. Delaying after + // waitToShareSession is handled by brokerClient.ProxyAnnounce, which + // will also extend the base request timeout, as required, to account for + // any deliberate delay. + + requestDelay := time.Duration(0) + announceDelay, announceDelayJitter := p.getAnnounceDelayParameters() + p.nextAnnounceMutex.Lock() + nextDelay := prng.JitterDuration(announceDelay, announceDelayJitter) + if p.nextAnnounceBrokerClient != brokerClient { + // Reset the delay when the broker client changes. + p.nextAnnounceNotBefore = time.Time{} + p.nextAnnounceBrokerClient = brokerClient + } + if p.nextAnnounceNotBefore.IsZero() { + p.nextAnnounceNotBefore = time.Now().Add(nextDelay) + // No delay for the very first announce request, so leave + // announceRequestDelay set to 0. + } else { + requestDelay = time.Until(p.nextAnnounceNotBefore) + if requestDelay < 0 { + // This announce did not arrive until after the next delay already + // passed, so proceed with no delay. + p.nextAnnounceNotBefore = time.Now().Add(nextDelay) + requestDelay = 0 + } else { + p.nextAnnounceNotBefore = p.nextAnnounceNotBefore.Add(nextDelay) + } + } + p.nextAnnounceMutex.Unlock() + + // A proxy ID is implicitly sent with requests; it's the proxy's session + // public key. + // + // ProxyAnnounce applies an additional request timeout to facilitate + // long-polling. + announceStartTime := time.Now() + announceResponse, err := brokerClient.ProxyAnnounce( + ctx, + requestDelay, + &ProxyAnnounceRequest{ + PersonalCompartmentIDs: brokerCoordinator.PersonalCompartmentIDs(), + Metrics: metrics, + }) + + p.config.Logger.WithTraceFields(common.LogFields{ + "delay": requestDelay.String(), + "elapsedTime": time.Since(announceStartTime).String(), + }).Info("announcement request") + + if err != nil { + return backOff, errors.Trace(err) + } + + if announceResponse.OperatorMessageJSON != "" { + p.config.OperatorMessageHandler(announceResponse.OperatorMessageJSON) + } + + if len(announceResponse.TacticsPayload) > 0 { + + // The TacticsPayload may include new tactics, or may simply signal, + // to the Psiphon client, that its tactics tag remains up-to-date and + // to extend cached tactics TTL. HandleTacticsPayload returns true + // when tactics haved changed; in this case we clear cached network + // discovery but proceed with handling the proxy announcement + // response as there may still be a match. + + if p.config.HandleTacticsPayload(tacticsNetworkID, announceResponse.TacticsPayload) { + p.resetNetworkDiscovery() + } + } + + // Signal that the announce round trip is complete. At this point, the + // broker Noise session should be established and any fresh tactics + // applied. + if signalAnnounceDone != nil { + signalAnnounceDone() + } + + // Trigger back-off back off when rate/entry limited; no back-off for + // no-match. + + if announceResponse.Limited { + + backOff = true + return backOff, errors.TraceNew("limited") + + } else if announceResponse.NoMatch { + + return backOff, errors.TraceNew("no match") + + } + + if announceResponse.ClientProxyProtocolVersion != ProxyProtocolVersion1 { + // This case is currently unexpected, as all clients and proxies use + // ProxyProtocolVersion1. + backOff = true + return backOff, errors.Tracef( + "Unsupported proxy protocol version: %d", + announceResponse.ClientProxyProtocolVersion) + } + + // Trigger back-off if the following WebRTC operations fail to establish a + // connections. + // + // Limitation: the proxy answer request to the broker may fail due to the + // non-back-off reasons documented above for the proxy announcment request; + // however, these should be unlikely assuming that the broker client is + // using a persistent transport connection. + + backOff = true + + // For activity updates, indicate that a client connection is now underway. + + atomic.AddInt32(&p.connectingClients, 1) + connected := false + defer func() { + if !connected { + atomic.AddInt32(&p.connectingClients, -1) + } + }() + + // Initialize WebRTC using the client's offer SDP + + webRTCAnswerCtx, webRTCAnswerCancelFunc := context.WithTimeout( + ctx, common.ValueOrDefault(webRTCCoordinator.WebRTCAnswerTimeout(), proxyWebRTCAnswerTimeout)) + defer webRTCAnswerCancelFunc() + + webRTCConn, SDP, sdpMetrics, webRTCErr := newWebRTCConnWithAnswer( + webRTCAnswerCtx, + &webRTCConfig{ + Logger: p.config.Logger, + EnableDebugLogging: p.config.EnableWebRTCDebugLogging, + WebRTCDialCoordinator: webRTCCoordinator, + ClientRootObfuscationSecret: announceResponse.ClientRootObfuscationSecret, + DoDTLSRandomization: announceResponse.DoDTLSRandomization, + TrafficShapingParameters: announceResponse.TrafficShapingParameters, + }, + announceResponse.ClientOfferSDP) + var webRTCRequestErr string + if webRTCErr != nil { + webRTCErr = errors.Trace(webRTCErr) + webRTCRequestErr = webRTCErr.Error() + SDP = WebRTCSessionDescription{} + sdpMetrics = &webRTCSDPMetrics{} + // Continue to report the error to the broker. The broker will respond + // with failure to the client's offer request. + } else { + defer webRTCConn.Close() + } + + // Send answer request with SDP or error. + + _, err = brokerClient.ProxyAnswer( + ctx, + &ProxyAnswerRequest{ + ConnectionID: announceResponse.ConnectionID, + SelectedProxyProtocolVersion: announceResponse.ClientProxyProtocolVersion, + ProxyAnswerSDP: SDP, + ICECandidateTypes: sdpMetrics.iceCandidateTypes, + AnswerError: webRTCRequestErr, + }) + if err != nil { + if webRTCErr != nil { + // Prioritize returning any WebRTC error for logging. + return backOff, webRTCErr + } + return backOff, errors.Trace(err) + } + + // Now that an answer is sent, stop if WebRTC initialization failed. + + if webRTCErr != nil { + return backOff, webRTCErr + } + + // Await the WebRTC connection. + + // We could concurrently dial the destination, to have that network + // connection available immediately once the WebRTC channel is + // established. This would work only for TCP, not UDP, network protocols + // and could only include the TCP connection, as client traffic is + // required for all higher layers such as TLS, SSH, etc. This could also + // create wasted load on destination Psiphon servers, particularly when + // WebRTC connections fail. + + awaitDataChannelCtx, awaitDataChannelCancelFunc := context.WithTimeout( + ctx, + common.ValueOrDefault( + webRTCCoordinator.WebRTCAwaitDataChannelTimeout(), dataChannelAwaitTimeout)) + defer awaitDataChannelCancelFunc() + + err = webRTCConn.AwaitInitialDataChannel(awaitDataChannelCtx) + if err != nil { + return backOff, errors.Trace(err) + } + + p.config.Logger.WithTraceFields(common.LogFields{ + "connectionID": announceResponse.ConnectionID, + }).Info("WebRTC data channel established") + + // Dial the destination, a Psiphon server. The broker validates that the + // dial destination is a Psiphon server. + + destinationDialContext, destinationDialCancelFunc := context.WithTimeout( + ctx, common.ValueOrDefault(webRTCCoordinator.ProxyDestinationDialTimeout(), proxyDestinationDialTimeout)) + defer destinationDialCancelFunc() + + // Use the custom resolver when resolving destination hostnames, such as + // those used in domain fronted protocols. + // + // - Resolving at the in-proxy should yield a more optimal CDN edge, vs. + // resolving at the client. + // + // - Sending unresolved hostnames to in-proxies can expose some domain + // fronting configuration. This can be mitigated by enabling domain + // fronting on this 2nd hop only when the in-proxy is located in a + // region that may be censored or blocked; this is to be enforced by + // the broker. + // + // - Any DNSResolverPreresolved tactics applied will be relative to the + // in-proxy location. + + destinationAddress, err := webRTCCoordinator.ResolveAddress( + ctx, "ip", announceResponse.DestinationAddress) + if err != nil { + return backOff, errors.Trace(err) + } + + destinationConn, err := webRTCCoordinator.ProxyUpstreamDial( + destinationDialContext, + announceResponse.NetworkProtocol.String(), + destinationAddress) + if err != nil { + return backOff, errors.Trace(err) + } + defer destinationConn.Close() + + // For activity updates, indicate that a client connection is established. + + connected = true + atomic.AddInt32(&p.connectingClients, -1) + atomic.AddInt32(&p.connectedClients, 1) + defer func() { + atomic.AddInt32(&p.connectedClients, -1) + }() + + // Throttle the relay connection. + // + // Here, each client gets LimitUp/DownstreamBytesPerSecond. Proxy + // operators may to want to limit their bandwidth usage with a single + // up/down value, an overall limit. The ProxyConfig can simply be + // generated by dividing the limit by MaxClients. This approach favors + // performance stability: each client gets the same throttling limits + // regardless of how many other clients are connected. + + destinationConn = common.NewThrottledConn( + destinationConn, + announceResponse.NetworkProtocol.IsStream(), + common.RateLimits{ + ReadBytesPerSecond: int64(p.config.LimitUpstreamBytesPerSecond), + WriteBytesPerSecond: int64(p.config.LimitDownstreamBytesPerSecond), + }) + + // Hook up bytes transferred counting for activity updates. + + // The ActivityMonitoredConn inactivity timeout is not configured, since + // the Psiphon server will close its connection to inactive clients on + // its own schedule. + + destinationConn, err = common.NewActivityMonitoredConn( + destinationConn, 0, false, nil, p.activityUpdateWrapper) + if err != nil { + return backOff, errors.Trace(err) + } + + // Relay the client traffic to the destination. The client traffic is a + // standard Psiphon tunnel protocol destinated to a Psiphon server. Any + // blocking/censorship at the 2nd hop will be mitigated by the use of + // Psiphon circumvention protocols and techniques. + + // Limitation: clients may apply fragmentation to traffic relayed over the + // data channel, and there's no guarantee that the fragmentation write + // sizes or delays will carry over to the egress side. + + // The proxy operator's ISP may be able to observe that the operator's + // host has nearly matching ingress and egress traffic. The traffic + // content won't be the same: the ingress traffic is wrapped in a WebRTC + // data channel, and the egress traffic is a Psiphon tunnel protocol. + // With padding and decoy packets, the ingress and egress traffic shape + // will differ beyond the basic WebRTC overheader. Even with this + // measure, over time the number of bytes in and out of the proxy may + // still indicate proxying. + + waitGroup := new(sync.WaitGroup) + relayErrors := make(chan error, 2) + var relayedUp, relayedDown int32 + + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + + // WebRTC data channels are based on SCTP, which is actually + // message-based, not a stream. The (default) max message size for + // pion/sctp is 65536: + // https://github.com/pion/sctp/blob/44ed465396c880e379aae9c1bf81809a9e06b580/association.go#L52. + // + // As io.Copy uses a buffer size of 32K, each relayed message will be + // less than the maximum. Calls to ClientConn.Write are also expected + // to use io.Copy, keeping messages at most 32K in size. + + // io.Copy doesn't return an error on EOF, but we still want to signal + // that relaying is done, so in this case a nil error is sent to the + // channel. + // + // Limitation: if one io.Copy goproutine sends nil and the other + // io.Copy goroutine sends a non-nil error concurrently, the non-nil + // error isn't prioritized. + + n, err := io.Copy(webRTCConn, destinationConn) + if n > 0 { + atomic.StoreInt32(&relayedDown, 1) + } + relayErrors <- errors.Trace(err) + }() + + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + n, err := io.Copy(destinationConn, webRTCConn) + if n > 0 { + atomic.StoreInt32(&relayedUp, 1) + } + relayErrors <- errors.Trace(err) + }() + + select { + case err = <-relayErrors: + case <-ctx.Done(): + } + + // Interrupt the relay goroutines by closing the connections. + webRTCConn.Close() + destinationConn.Close() + + waitGroup.Wait() + + p.config.Logger.WithTraceFields(common.LogFields{ + "connectionID": announceResponse.ConnectionID, + }).Info("connection closed") + + // Don't apply a back-off delay to the next announcement since this + // iteration successfully relayed bytes. + if atomic.LoadInt32(&relayedUp) == 1 || atomic.LoadInt32(&relayedDown) == 1 { + backOff = false + } + + return backOff, err +} + +func (p *Proxy) getMetrics(webRTCCoordinator WebRTCDialCoordinator) (*ProxyMetrics, string, error) { + + // tacticsNetworkID records the exact network ID that corresponds to the + // tactics tag sent in the base parameters, and is used when applying any + // new tactics returned by the broker. + baseParams, tacticsNetworkID, err := p.config.GetBaseAPIParameters() + if err != nil { + return nil, "", errors.Trace(err) + } + + packedBaseParams, err := protocol.EncodePackedAPIParameters(baseParams) + if err != nil { + return nil, "", errors.Trace(err) + } + + return &ProxyMetrics{ + BaseAPIParameters: packedBaseParams, + ProxyProtocolVersion: ProxyProtocolVersion1, + NATType: webRTCCoordinator.NATType(), + PortMappingTypes: webRTCCoordinator.PortMappingTypes(), + MaxClients: int32(p.config.MaxClients), + ConnectingClients: atomic.LoadInt32(&p.connectingClients), + ConnectedClients: atomic.LoadInt32(&p.connectedClients), + LimitUpstreamBytesPerSecond: int64(p.config.LimitUpstreamBytesPerSecond), + LimitDownstreamBytesPerSecond: int64(p.config.LimitDownstreamBytesPerSecond), + PeakUpstreamBytesPerSecond: atomic.LoadInt64(&p.peakBytesUp), + PeakDownstreamBytesPerSecond: atomic.LoadInt64(&p.peakBytesDown), + }, tacticsNetworkID, nil +} diff --git a/psiphon/common/inproxy/records.go b/psiphon/common/inproxy/records.go new file mode 100644 index 000000000..700f8f956 --- /dev/null +++ b/psiphon/common/inproxy/records.go @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "encoding/binary" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/fxamacker/cbor/v2" +) + +// Records are CBOR-encoded data with a preamble, or prefix, indicating the +// encoding schema version, data type, and data length. Records include +// session messages, as well as API requests and responses which are session +// message payloads. + +const ( + recordVersion = 1 + + recordTypeFirst = 1 + recordTypeSessionPacket = 1 + recordTypeSessionRoundTrip = 2 + recordTypeAPIProxyAnnounceRequest = 3 + recordTypeAPIProxyAnnounceResponse = 4 + recordTypeAPIProxyAnswerRequest = 5 + recordTypeAPIProxyAnswerResponse = 6 + recordTypeAPIClientOfferRequest = 7 + recordTypeAPIClientOfferResponse = 8 + recordTypeAPIClientRelayedPacketRequest = 9 + recordTypeAPIClientRelayedPacketResponse = 10 + recordTypeAPIBrokerServerReport = 11 + recordTypeLast = 11 +) + +func marshalRecord(record interface{}, recordType int) ([]byte, error) { + payload, err := protocol.CBOREncoding.Marshal(record) + if err != nil { + return nil, errors.Trace(err) + } + payload, err = addRecordPreamble(recordType, payload) + if err != nil { + return nil, errors.Trace(err) + } + return payload, nil +} + +func unmarshalRecord(expectedRecordType int, payload []byte, record interface{}) error { + payload, err := readRecordPreamble(expectedRecordType, payload) + if err != nil { + return errors.Trace(err) + } + err = cbor.Unmarshal(payload, record) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// addRecordPreamble prepends a record preamble to the given record data +// buffer. The input recordType specifies the type to encode; a version +// number identifying the current encoding schema is supplied automatically. +// +// To avoid allocations, addRecordPreamble modifies the input record buffer; +// use like record = append(record, ...). +func addRecordPreamble( + recordType int, record []byte) ([]byte, error) { + + if recordVersion < 0 || recordVersion > 0xff { + return nil, errors.TraceNew("invalid record preamble version") + } + + if recordType < 0 || recordType > 0xff { + return nil, errors.TraceNew("invalid record preamble type") + } + + if len(record) > 0xffff { + return nil, errors.TraceNew("invalid record length") + } + + // The preamble: + // [ 1 byte version ][ 1 byte type ][ varint record data length ][ ...record data ... ] + + var preamble [2 + binary.MaxVarintLen64]byte + preamble[0] = byte(recordVersion) + preamble[1] = byte(recordType) + preambleLen := 2 + binary.PutUvarint(preamble[2:], uint64(len(record))) + + // Attempt to use the input buffer, which will avoid an allocation if it + // has sufficient capacity. + record = append(record, preamble[:preambleLen]...) + copy(record[preambleLen:], record[:len(record)-preambleLen]) + copy(record[0:preambleLen], preamble[:preambleLen]) + + return record, nil +} + +// peekRecordPreambleType returns the record type of the record data payload, +// or an error if the preamble is invalid. +func peekRecordPreambleType(payload []byte) (int, error) { + + if len(payload) < 2 { + return -1, errors.TraceNew("invalid record preamble length") + } + + if int(payload[0]) != recordVersion { + return -1, errors.TraceNew("invalid record preamble version") + } + + recordType := int(payload[1]) + + if recordType < recordTypeFirst || recordType > recordTypeLast { + return -1, errors.Tracef("invalid record preamble type: %d %x", recordType, payload) + } + + return recordType, nil +} + +// readRecordPreamble consumes the record preamble from the given record data +// payload and returns the remaining record. The record type must match +// expectedRecordType and the version must match a known encoding schema +// version. +// +// To avoid allocations, readRecordPreamble returns a slice of the +// input record buffer; use like record = record[n:]. +func readRecordPreamble(expectedRecordType int, payload []byte) ([]byte, error) { + + if len(payload) < 2 { + return nil, errors.TraceNew("invalid record preamble length") + } + + if int(payload[0]) != recordVersion { + return nil, errors.TraceNew("invalid record preamble version") + } + + if int(payload[1]) != expectedRecordType { + return nil, errors.Tracef("unexpected record preamble type") + } + + recordDataLength, n := binary.Uvarint(payload[2:]) + if (recordDataLength == 0 && n <= 0) || 2+n > len(payload) { + return nil, errors.Tracef("invalid record preamble data length") + } + + record := payload[2+n:] + + // In the future, the data length field may be used to implement framing + // for a stream of records. For now, this check is simply a sanity check. + if len(record) != int(recordDataLength) { + return nil, errors.TraceNew("unexpected record preamble data length") + } + + return record, nil +} diff --git a/psiphon/common/inproxy/sdp_test.go b/psiphon/common/inproxy/sdp_test.go new file mode 100644 index 000000000..a1297f7e9 --- /dev/null +++ b/psiphon/common/inproxy/sdp_test.go @@ -0,0 +1,157 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "context" + "net" + "strings" + "testing" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" +) + +func TestProcessSDP(t *testing.T) { + err := runTestProcessSDP() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestProcessSDP() error { + + config := &webRTCConfig{ + Logger: newTestLogger(), + WebRTCDialCoordinator: &testWebRTCDialCoordinator{ + disableSTUN: true, + disablePortMapping: true, + }, + } + + // Create a valid, base SDP, including private network (bogon) candidates. + + SetAllowBogonWebRTCConnections(true) + defer SetAllowBogonWebRTCConnections(false) + + conn, webRTCSDP, metrics, err := newWebRTCConnWithOffer( + context.Background(), config) + if err != nil { + return errors.Trace(err) + } + defer conn.Close() + + SDP := []byte(webRTCSDP.SDP) + + // Test disallow IPv6 + + if metrics.hasIPv6 { + preparedSDP, metrics, err := prepareSDPAddresses( + SDP, true, "", true) + if err != nil { + return errors.Trace(err) + } + + found := false + for _, reason := range metrics.filteredICECandidates { + if strings.Contains(reason, "disabled") { + found = true + break + } + } + if !found { + return errors.TraceNew("unexpected filteredICECandidates") + } + + if len(preparedSDP) >= len(SDP) { + return errors.TraceNew("unexpected SDP length") + } + } + + // Test filter unexpected GeoIP + + // This IP must not be a bogon; this address is not dialed. + testIP := "1.1.1.1" + expectedGeoIP := common.GeoIPData{Country: "AA", ASN: "1"} + lookupGeoIP := func(IP string) common.GeoIPData { + if IP == testIP { + return common.GeoIPData{Country: "BB", ASN: "2"} + } + return expectedGeoIP + } + + // Add the testIP as a port mapping candidate. + preparedSDP, metrics, err := prepareSDPAddresses( + SDP, true, net.JoinHostPort(testIP, "80"), false) + if err != nil { + return errors.Trace(err) + } + + filteredSDP, metrics, err := filterSDPAddresses( + preparedSDP, true, lookupGeoIP, expectedGeoIP) + if err != nil { + return errors.Trace(err) + } + + found := false + for _, reason := range metrics.filteredICECandidates { + if strings.Contains(reason, "unexpected GeoIP") { + found = true + break + } + } + if !found { + return errors.TraceNew("unexpected filteredICECandidates") + } + + if len(filteredSDP) >= len(preparedSDP) { + return errors.TraceNew("unexpected SDP length") + } + + // Test filter bogons + + SetAllowBogonWebRTCConnections(false) + + // Allow no candidates (errorOnNoCandidates = false) + filteredSDP, metrics, err = filterSDPAddresses( + SDP, false, nil, common.GeoIPData{}) + if err != nil { + return errors.Trace(err) + } + + found = false + for _, reason := range metrics.filteredICECandidates { + if strings.Contains(reason, "bogon") { + found = true + break + } + } + if !found { + return errors.TraceNew("unexpected filteredICECandidates") + } + + if len(filteredSDP) >= len(preparedSDP) { + return errors.TraceNew("unexpected SDP length") + } + + return nil +} diff --git a/psiphon/common/inproxy/server.go b/psiphon/common/inproxy/server.go new file mode 100644 index 000000000..c3d12b28b --- /dev/null +++ b/psiphon/common/inproxy/server.go @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" +) + +// MaxRelayRoundTrips is a sanity/anti-DoS check against clients that attempt +// to relay more packets than are required for both a session handshake and +// application-level request round trip. +const MaxRelayRoundTrips = 10 + +// ServerBrokerSessions manages the secure sessions that handle +// BrokerServerReports from brokers. Each in-proxy-capable Psiphon server +// maintains a ServerBrokerSessions, with a set of established sessions for +// each broker. Session messages are relayed between the broker and the +// server by the client. +type ServerBrokerSessions struct { + sessions *ResponderSessions +} + +// NewServerBrokerSessions create a new ServerBrokerSessions, with the +// specified key material. The expected brokers are authenticated with +// brokerPublicKeys, an allow list. +func NewServerBrokerSessions( + serverPrivateKey SessionPrivateKey, + serverRootObfuscationSecret ObfuscationSecret, + brokerPublicKeys []SessionPublicKey) (*ServerBrokerSessions, error) { + + sessions, err := NewResponderSessionsForKnownInitiators( + serverPrivateKey, serverRootObfuscationSecret, brokerPublicKeys) + if err != nil { + return nil, errors.Trace(err) + } + + return &ServerBrokerSessions{ + sessions: sessions, + }, nil +} + +// SetKnownBrokerPublicKeys updates the set of broker public keys which are +// allowed to establish sessions with the server. Any existing sessions with +// keys not in the new list are deleted. Existing sessions with keys which +// remain in the list are retained. +func (s *ServerBrokerSessions) SetKnownBrokerPublicKeys( + brokerPublicKeys []SessionPublicKey) error { + + return errors.Trace(s.sessions.SetKnownInitiatorPublicKeys(brokerPublicKeys)) +} + +// ProxiedConnectionHandler is a callback, provided by the Psiphon server, +// that receives information from a BrokerServerReport for the client +// associated with the callback. +// +// The server must use the brokerVerifiedOriginalClientIP for all GeoIP +// operations associated with the client, including traffic rule selection +// and client-side tactics selection. +// +// Since the BrokerServerReport may be delivered later than the Psiphon +// handshake request -- in the case where the broker/server session needs to +// be established there will be additional round trips -- the server should +// delay traffic rule application, tactics responses, and allowing tunneled +// traffic until after the ProxiedConnectionHandler callback is invoked for +// the client. As a consequence, Psiphon Servers should be configured to +// require Proxies to be used for designated protocols. It's expected that +// server-side tactics such as packet manipulation will be applied based on +// the proxy's IP address. +// +// The fields in logFields should be added to server_tunnel logs. +type ProxiedConnectionHandler func( + brokerVerifiedOriginalClientIP string, + logFields common.LogFields) + +// HandlePacket handles a broker/server session packet, which are relayed by +// clients. In Psiphon, the packets may be exchanged in the Psiphon +// handshake, or in subsequent SSH requests and responses. When the +// broker/server session is already established, it's expected that the +// BrokerServerReport arrives in the packet that accompanies the Psiphon +// handshake, and so no additional round trip is required. +// +// Once the session is established and a verified BrokerServerReport arrives, +// the information from that report is sent to the ProxiedConnectionHandler +// callback. The callback should be associated with the client that is +// relaying the packets. +// +// clientConnectionID is the in-proxy connection ID specified by the client in +// its Psiphon handshake. +// +// When the retOut return value is not nil, it should be relayed back to the +// client in the handshake response or other tunneled response. When retOut +// is nil, the relay is complete. +// +// In the session reset token case, HandlePacket will return a non-nil retOut +// along with a retErr; the server should both log retErr and also relay the +// packet to the broker. +func (s *ServerBrokerSessions) HandlePacket( + logger common.Logger, + in []byte, + clientConnectionID ID, + handler ProxiedConnectionHandler) (retOut []byte, retErr error) { + + handleUnwrappedReport := func(initiatorID ID, unwrappedReportPayload []byte) ([]byte, error) { + + brokerReport, err := UnmarshalBrokerServerReport(unwrappedReportPayload) + if err != nil { + return nil, errors.Trace(err) + } + + logFields, err := brokerReport.ValidateAndGetLogFields() + if err != nil { + return nil, errors.Trace(err) + } + + // The initiatorID is the broker's public key. + logFields["inproxy_broker_id"] = initiatorID + + logFields["inproxy_connection_id"] = brokerReport.ConnectionID + logFields["inproxy_proxy_id"] = brokerReport.ProxyID + + // !matched_common_compartments implies a personal compartment ID match + logFields["inproxy_matched_common_compartments"] = brokerReport.MatchedCommonCompartments + logFields["inproxy_proxy_nat_type"] = brokerReport.ProxyNATType + logFields["inproxy_proxy_port_mapping_types"] = brokerReport.ProxyPortMappingTypes + logFields["inproxy_client_nat_type"] = brokerReport.ClientNATType + logFields["inproxy_client_port_mapping_types"] = brokerReport.ClientPortMappingTypes + + // TODO: + // - log IPv4 vs. IPv6 information + // - relay and log broker transport stats, such as meek HTTP version + + ok := true + + // The client must supply same connection ID to server that the broker + // sends to the server. + if brokerReport.ConnectionID != clientConnectionID { + + // Limitation: as the BrokerServerReport is a one-way message with + // no response, the broker will not be notified of tunnel failure + // errors including "connection ID mismatch", and cannot log this + // connection attempt outcome. + + logger.WithTraceFields(common.LogFields{ + "client_inproxy_connection_id": clientConnectionID, + "broker_inproxy_connection_id": brokerReport.ConnectionID, + }).Error("connection ID mismatch") + + ok = false + } + + if ok { + + handler(brokerReport.ClientIP, logFields) + } + + // Returns nil, as there is no response to the report, and so no + // additional packet to relay. + + return nil, nil + } + + out, err := s.sessions.HandlePacket(in, handleUnwrappedReport) + return out, errors.Trace(err) +} diff --git a/psiphon/common/inproxy/session.go b/psiphon/common/inproxy/session.go new file mode 100644 index 000000000..c70fdbbe3 --- /dev/null +++ b/psiphon/common/inproxy/session.go @@ -0,0 +1,2112 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "bytes" + "context" + "crypto/ed25519" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "math" + "sync" + "time" + + "filippo.io/edwards25519" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + lrucache "github.com/cognusion/go-cache-lru" + "github.com/flynn/noise" + "golang.org/x/crypto/curve25519" + "golang.zx2c4.com/wireguard/replay" +) + +const ( + sessionsTTL = 24 * time.Hour + sessionsMaxSize = 1000000 + + sessionObfuscationPaddingMinSize = 0 + sessionObfuscationPaddingMaxSize = 256 + + resetSessionTokenName = "psiphon-inproxy-session-reset-session-token" + resetSessionTokenNonceSize = 32 +) + +const ( + SessionProtocolName = "psiphon-inproxy-session" + SessionProtocolVersion1 = 1 +) + +// SessionPrologue is a Noise protocol prologue, which binds the session ID to +// the session. +type SessionPrologue struct { + SessionProtocolName string `cbor:"1,keyasint,omitempty"` + SessionProtocolVersion uint32 `cbor:"2,keyasint,omitempty"` + SessionID ID `cbor:"3,keyasint,omitempty"` +} + +// SessionPacket is a Noise protocol message, which may be a session handshake +// message, or secured application data, a SessionRoundTrip. +type SessionPacket struct { + SessionID ID `cbor:"1,keyasint,omitempty"` + Nonce uint64 `cbor:"2,keyasint,omitempty"` + Payload []byte `cbor:"3,keyasint,omitempty"` + ResetSessionToken []byte `cbor:"4,keyasint,omitempty"` +} + +// SessionRoundTrip is an application data request or response, which is +// secured by the Noise protocol session. Each request is assigned a unique +// RoundTripID, and each corresponding response has the same RoundTripID. +type SessionRoundTrip struct { + RoundTripID ID `cbor:"1,keyasint,omitempty"` + Payload []byte `cbor:"2,keyasint,omitempty"` +} + +// SessionPrivateKey is a Noise protocol private key. +type SessionPrivateKey [ed25519.PrivateKeySize]byte + +// GenerateSessionPrivateKey creates a new session private key using +// crypto/rand. +// +// GenerateSessionPrivateKey generates an Ed25519 private key, which is used +// directly for digital signatures and, when converted to Curve25519, as the +// Noise protocol ECDH private key. +// +// The Ed25519 representation is the canonical representation since there's a +// 1:1 conversion from Ed25519 to Curve25519, but not the other way. +// +// Digital signing use cases include signing a reset session token. In +// addition, externally, digital signing can be used in a challenge/response +// protocol that demonstrates ownership of a proxy private key corresponding +// to a claimed proxy public key. +func GenerateSessionPrivateKey() (SessionPrivateKey, error) { + + var k SessionPrivateKey + + _, privateKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return k, errors.Trace(err) + } + + if len(privateKey) != len(k) { + return k, errors.TraceNew("unexpected private key length") + } + copy(k[:], privateKey) + + return k, nil +} + +// SessionPrivateKeyFromString returns a SessionPrivateKey given its base64 +// string encoding. +func SessionPrivateKeyFromString(s string) (SessionPrivateKey, error) { + var k SessionPrivateKey + return k, errors.Trace(fromBase64String(s, k[:])) +} + +// String emits SessionPrivateKey as base64. +func (k SessionPrivateKey) String() string { + return base64.RawStdEncoding.EncodeToString([]byte(k[:])) +} + +// IsZero indicates if the private key is zero-value. +func (k SessionPrivateKey) IsZero() bool { + var zero SessionPrivateKey + return bytes.Equal(k[:], zero[:]) +} + +// GetPublicKey returns the public key corresponding to the private key. +func (k SessionPrivateKey) GetPublicKey() (SessionPublicKey, error) { + + var sessionPublicKey SessionPublicKey + + // See ed25519.PrivateKey.Public. + copy(sessionPublicKey[:], k[32:]) + + return sessionPublicKey, nil +} + +// ToCurve25519 converts the Ed25519 SessionPrivateKey to the unique +// corresponding Curve25519 private key for use in the Noise protocol. +func (k SessionPrivateKey) ToCurve25519() []byte { + h := sha512.New() + h.Write(ed25519.PrivateKey(k[:]).Seed()) + return h.Sum(nil)[:curve25519.ScalarSize] +} + +// SessionPublicKey is a Noise protocol public key. +type SessionPublicKey [ed25519.PublicKeySize]byte + +// SessionPublicKeyFromString returns a SessionPublicKey given its base64 +// string encoding. +func SessionPublicKeyFromString(s string) (SessionPublicKey, error) { + var k SessionPublicKey + return k, errors.Trace(fromBase64String(s, k[:])) +} + +// SessionPublicKeysFromStrings returns a list of SessionPublicKeys given the +// base64 string encodings. +func SessionPublicKeysFromStrings(strs []string) ([]SessionPublicKey, error) { + keys := make([]SessionPublicKey, len(strs)) + for i, s := range strs { + err := fromBase64String(s, keys[i][:]) + if err != nil { + return nil, errors.Trace(err) + } + } + return keys, nil +} + +// String emits SessionPublicKey as base64. +func (k SessionPublicKey) String() string { + return base64.RawStdEncoding.EncodeToString([]byte(k[:])) +} + +// ToCurve25519 converts the Ed25519 SessionPublicKey to the unique +// corresponding Curve25519 public key for use in the Noise protocol. +func (k SessionPublicKey) ToCurve25519() (SessionPublicKeyCurve25519, error) { + + var c SessionPublicKeyCurve25519 + + // Copyright 2019 The age Authors. All rights reserved. + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // + // See https://blog.filippo.io/using-ed25519-keys-for-encryption and + // https://pkg.go.dev/filippo.io/edwards25519#Point.BytesMontgomery. + p, err := new(edwards25519.Point).SetBytes(k[:]) + if err != nil { + return c, err + } + + copy(c[:], p.BytesMontgomery()) + + return c, nil +} + +// SessionPublicKeyCurve25519 is a representation of a Curve25519 public key +// as a fixed-size array that may be used as a map key. +type SessionPublicKeyCurve25519 [curve25519.PointSize]byte + +// String emits SessionPublicKeyCurve25519 as base64. +func (k SessionPublicKeyCurve25519) String() string { + return base64.RawStdEncoding.EncodeToString([]byte(k[:])) +} + +// InitiatorSessions is a set of secure Noise protocol sessions for an +// initiator. For in-proxy, clients and proxies will initiate sessions with +// one more brokers and brokers will initiate sessions with multiple Psiphon +// servers. +// +// Secure sessions provide encryption, authentication of the responder, +// identity hiding for the initiator, forward secrecy, and anti-replay for +// application data. +// +// Maintaining a set of established sessions minimizes round trips and +// overhead, as established sessions can be shared and reused for many client +// requests to one broker or many broker requests to one server. +// +// Currently, InitiatorSessions doesn't not cap the number of sessions or use +// an LRU cache since the number of peers is bounded in the in-proxy +// architecture; clients will typically use one or no more than a handful of +// brokers and brokers will exchange requests with a subset of Psiphon +// servers bounded by the in-proxy capability. +// +// InitiatorSessions are used via the RoundTrip function or InitiatorRoundTrip +// type. RoundTrip is a synchronous function which performs any necessary +// session establishment handshake along with the request/response exchange. +// InitiatorRoundTrip offers an iterator interface, with stepwise invocations +// for each step of the handshake and round trip. +// +// All round trips attempt to share and reuse any existing, established +// session to a given peer. For a given peer, the waitToShareSession option +// determines whether round trips will block and wait if a session handshake +// is already in progress, or proceed with a concurrent handshake. For +// in-proxy, clients and proxies use waitToShareSession; as broker/server +// round trips are relayed through clients, brokers do not use +// waitToShareSession so as to not rely on any single client. +// +// Round trips can be performed concurrently and requests can arrive out-of- +// order. The higher level transport for sessions is responsible for +// multiplexing round trips and maintaining the association between a request +// and it's corresponding response. +type InitiatorSessions struct { + privateKey SessionPrivateKey + + mutex sync.Mutex + sessions map[SessionPublicKey]*session +} + +// NewInitiatorSessions creates a new InitiatorSessions with the specified +// initator private key. +func NewInitiatorSessions( + initiatorPrivateKey SessionPrivateKey) *InitiatorSessions { + + return &InitiatorSessions{ + privateKey: initiatorPrivateKey, + sessions: make(map[SessionPublicKey]*session), + } +} + +// RoundTrip sends the request to the specified responder and returns the +// response. +// +// RoundTrip will establish a session when required, or reuse an existing +// session when available. +// +// When waitToShareSession is true, RoundTrip will block until an existing, +// non-established session is available to be shared. +// +// When making initial network round trips to establish a session, +// sessionHandshakeTimeout is applied as the round trip timeout. +// +// When making the application-level request round trip, requestDelay, when > +// 0, is applied before the request network round trip begins; requestDelay +// may be used to spread out many concurrent requests, such as batch proxy +// announcements, to avoid CDN rate limits. +// +// requestTimeout is applied to the application-level request network round +// trip, and excludes any requestDelay; the distinct requestTimeout may be +// used to set a longer timeout for long-polling requests, such as proxy +// announcements. +// +// Any time spent blocking on waitToShareSession is not included in +// requestDelay or requestTimeout. +// +// RoundTrip returns immediately when ctx becomes done. +func (s *InitiatorSessions) RoundTrip( + ctx context.Context, + roundTripper RoundTripper, + responderPublicKey SessionPublicKey, + responderRootObfuscationSecret ObfuscationSecret, + waitToShareSession bool, + sessionHandshakeTimeout time.Duration, + requestDelay time.Duration, + requestTimeout time.Duration, + request []byte) ([]byte, error) { + + rt, err := s.NewRoundTrip( + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + request) + if err != nil { + return nil, errors.Trace(err) + } + + var in []byte + for { + out, isRequestPacket, err := rt.Next(ctx, in) + if err != nil { + return nil, errors.Trace(err) + } + if out == nil { + response, err := rt.Response() + if err != nil { + return nil, errors.Trace(err) + } + return response, nil + } + + // At this point, if sharing a session, any blocking on + // waitToShareSession is complete, and time elapsed in that blocking + // will not collapse delays or reduce timeouts. If not sharing, and + // establishing a new session, Noise session handshake round trips + // are required before the request payload round trip. + // + // Select the delay and timeout. For Noise session handshake round + // trips, use sessionHandshakeTimeout, which should be appropriate + // for a fast turn-around from the broker, and no delay. When sending + // the application-level request packet, use requestDelay and + // requestTimeout, which allows for applying a delay -- to spread out + // requests -- and a potentially longer timeout appropriate for a + // long-polling, slower turn-around from the broker. + // + // Delays and timeouts are passed down into the round tripper + // provider. Having the round tripper perform the delay sleep allows + // all delays to be interruped by any round tripper close, due to an + // overall broker client reset. Passing the timeout seperately, as + // opposed to adding to ctx, explicitly ensures that the timeout is + // applied only right before the network round trip and no sooner. + + var delay, timeout time.Duration + if isRequestPacket { + delay = requestDelay + timeout = requestTimeout + } else { + // No delay for session handshake packet round trips. + timeout = sessionHandshakeTimeout + } + + in, err = roundTripper.RoundTrip(ctx, delay, timeout, out) + if err != nil { + + // There are no explicit retries here. Retrying in the case where + // the initiator attempts to use an expired session is covered by + // the reset session token logic in InitiatorRoundTrip. Higher + // levels implicitly provide additional retries to cover other + // cases; Psiphon client tunnel establishment will retry in-proxy + // dials; the proxy will retry its announce requests if they + // fail. + + // If this round trip owns its session and there are any + // waitToShareSession initiators awaiting the session, signal them + // that the session will not become ready. + + rt.TransportFailed() + + return nil, errors.Trace(err) + } + } +} + +// NewRoundTrip creates a new InitiatorRoundTrip which will perform a +// request/response round trip with the specified responder, sending the +// input request. The InitiatorRoundTrip will establish a session when +// required, or reuse an existing session when available. +// +// When waitToShareSession is true, InitiatorRoundTrip.Next will block until +// an existing, non-established session is available to be shared. +// +// Limitation with waitToShareSession: currently, any new session must +// complete an _application-level_ round trip (e.g., ProxyAnnounce/ClientOffer +// request _and_ response) before the session becomes ready to share since +// the first application-level request is sent in the same packet as the last +// handshake message and ready-to-share is only signalled after a subsequent +// packet is received. This means that, for example, a long-polling +// ProxyAnnounce will block any additional ProxyAnnounce requests attempting +// to share the same InitiatorSessions. In practice, an initial +// ProxyAnnounce/ClientOffer request is expected to block only as long as +// there is no match, so the impact of blocking other concurrent requests is +// limited. See comment in InitiatorRoundTrip.Next for a related future +// enhancement. +// +// NewRoundTrip does not block or perform any session operations; the +// operations begin on the first InitiatorRoundTrip.Next call. The content of +// request should not be modified after calling NewRoundTrip. +func (s *InitiatorSessions) NewRoundTrip( + responderPublicKey SessionPublicKey, + responderRootObfuscationSecret ObfuscationSecret, + waitToShareSession bool, + request []byte) (*InitiatorRoundTrip, error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + // Generate a new round trip ID for the session round trip. The response + // is expected to echo back the same round trip ID. This check detects + // any potential misrouting of multiplexed round trip exchanges. + + roundTripID, err := MakeID() + if err != nil { + return nil, errors.Trace(err) + } + + requestPayload, err := marshalRecord( + SessionRoundTrip{RoundTripID: roundTripID, Payload: request}, + recordTypeSessionRoundTrip) + if err != nil { + return nil, errors.Trace(err) + } + + return &InitiatorRoundTrip{ + initiatorSessions: s, + responderPublicKey: responderPublicKey, + responderRootObfuscationSecret: responderRootObfuscationSecret, + waitToShareSession: waitToShareSession, + roundTripID: roundTripID, + requestPayload: requestPayload, + }, nil +} + +// getSession looks for an existing session for the peer specified by public +// key. When none is found, newSession is called to create a new session, and +// this is stored, associated with the key. If an existing session is found, +// indicate if it is ready to be shared or not. +func (s *InitiatorSessions) getSession( + publicKey SessionPublicKey, + newSession func() (*session, error)) ( + retSession *session, retIsNew bool, retIsReady bool, retErr error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + session, ok := s.sessions[publicKey] + if ok { + return session, false, session.isReadyToShare(nil), nil + } + + session, err := newSession() + if err != nil { + return nil, false, false, errors.Trace(err) + } + + s.sessions[publicKey] = session + + return session, true, session.isReadyToShare(nil), nil +} + +// setSession sets the session associated with the peer's public key. +func (s *InitiatorSessions) setSession(publicKey SessionPublicKey, session *session) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + s.sessions[publicKey] = session +} + +// removeIfSession removes the session associated with the peer's public key, +// if it's the specified session. +func (s *InitiatorSessions) removeIfSession(publicKey SessionPublicKey, session *session) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + currentSession, ok := s.sessions[publicKey] + if !ok || session != currentSession { + return + } + + delete(s.sessions, publicKey) +} + +// InitiatorRoundTrip represents the state of a session round trip, including +// a session handshake if required. The session handshake and round trip is +// advanced by calling InitiatorRoundTrip.Next. +type InitiatorRoundTrip struct { + initiatorSessions *InitiatorSessions + responderPublicKey SessionPublicKey + responderRootObfuscationSecret ObfuscationSecret + waitToShareSession bool + roundTripID ID + requestPayload []byte + + mutex sync.Mutex + sharingSession bool + session *session + lastSentPacket bytes.Buffer + response []byte +} + +// Next advances a round trip, as well as any session handshake that may be +// first required. Next takes the next packet received from the responder and +// returns the next packet to send to the responder. To begin, pass a nil +// receivedPacket. The round trip is complete when Next returns nil for the +// next packet to send; the response can be fetched from +// InitiatorRoundTrip.Response. +// +// When waitToShareSession is set, Next will block until an existing, +// non-established session is available to be shared. +// +// Multiple concurrent round trips are supported and requests from different +// round trips can arrive at the responder out-of-order. The provided +// transport is responsible for multiplexing round trips and maintaining an +// association between sent and received packets for a given round trip. +// +// Next returns immediately when ctx becomes done. +func (r *InitiatorRoundTrip) Next( + ctx context.Context, + receivedPacket []byte) (retSendPacket []byte, retIsRequestPacket bool, retErr error) { + + // Note: don't clear or reset a session in the event of a bad/rejected + // packet as that would allow a malicious relay client to interrupt a + // valid broker/server session with a malformed packet. Just drop the + // packet and return an error. + + // beginOrShareSession returns the next packet to send. + beginOrShareSession := func() ([]byte, bool, error) { + + // Check for an existing session, or create a new one if there's no + // existing session. + // + // To ensure the concurrent waitToShareSession cases don't start + // multiple handshakes, getSession populates the initiatorSessions + // session map with a new, unestablished session. + + newSession := func() (*session, error) { + + sendObfuscationSecret, receiveObfuscationSecret, err := + deriveSessionPacketObfuscationSecrets(r.responderRootObfuscationSecret, false) + if err != nil { + return nil, errors.Trace(err) + } + + session, err := newSession( + true, // isInitiator + r.initiatorSessions.privateKey, + sendObfuscationSecret, + receiveObfuscationSecret, + nil, // No obfuscation replay history + &r.responderPublicKey, + r.requestPayload, + nil, + nil) + if err != nil { + return nil, errors.Trace(err) + } + return session, nil + } + + session, isNew, isReady, err := r.initiatorSessions.getSession( + r.responderPublicKey, newSession) + if err != nil { + return nil, false, errors.Trace(err) + } + + if isNew { + + // When isNew is true, this InitiatorRoundTrip owns the session + // and will perform the handshake. + + r.session = session + r.sharingSession = false + + } else { + + if isReady { + + // When isReady is true, this shared session is fully + // established and ready for immediate use. + + r.session = session + r.sharingSession = true + + } else { + + // The existing session is not yet ready for use. + + if r.waitToShareSession { + + // Wait for the owning InitiatorRoundTrip to complete the + // session handshake and then share the session. + + // Limitation with waitToShareSession: isReadyToShare + // becomes true only once the session completes + // an _application-level_ round trip + // (e.g., ProxyAnnounce/ClientOffer request _and_ + // response) since the first application-level request is + // bundled with the last handshake message and + // ready-to-share is true only after a subsequent packet + // is received, guaranteeing that the handshake is completed. + // + // Future enhancement: for shared sessions, don't bundle + // the request payload with the handshake. This implies + // one extra round trip for the initial requester, but + // allows all sharers to proceed at once. + + signal := make(chan struct{}) + if !session.isReadyToShare(signal) { + select { + case <-signal: + if !session.isReadyToShare(nil) { + + // The session failed to become ready to share due to a transport + // failure during the handshake. Fail this round trip. Don't + // create a new, unshared session since waitToShareSession was + // specified. It's expected that there will be retries by the + // RoundTrip caller. + + return nil, false, errors.TraceNew("waitToShareSession failed") + } + // else, use the session + case <-ctx.Done(): + return nil, false, errors.Trace(ctx.Err()) + } + } + r.session = session + r.sharingSession = true + + } else { + + // Don't wait: create a new, unshared session. + + r.session, err = newSession() + if err != nil { + return nil, false, errors.Trace(err) + } + r.sharingSession = false + } + } + } + + if r.sharingSession { + + // The shared session was either ready for immediate use, or we + // waited. Send the round trip request payload. + + sendPacket, err := r.session.sendPacket(r.requestPayload) + if err != nil { + return nil, false, errors.Trace(err) + } + + return sendPacket, true, nil + } + + // Begin the handshake for a new session. + + _, sendPacket, _, err := r.session.nextHandshakePacket(nil) + if err != nil { + return nil, false, errors.Trace(err) + } + + return sendPacket, false, nil + + } + + // Return immediately if the context is already done. + if ctx != nil { + err := ctx.Err() + if err != nil { + return nil, false, errors.Trace(err) + } + } + + r.mutex.Lock() + defer r.mutex.Unlock() + + // Store the output send packet, which is used to verify that any + // subsequent ResetSessionToken isn't replayed. + defer func() { + if retSendPacket != nil { + r.lastSentPacket.Reset() + r.lastSentPacket.Write(retSendPacket) + } + }() + + if r.session == nil { + + // If the session is nil, this is the first call to Next, and no + // packet from the peer is expected. + + if receivedPacket != nil { + return nil, false, errors.TraceNew("unexpected received packet") + } + + sendPacket, isRequestPacket, err := beginOrShareSession() + + if err != nil { + return nil, false, errors.Trace(err) + } + return sendPacket, isRequestPacket, nil + + } + + // Not the first Next call, so a packet from the peer is expected. + + if receivedPacket == nil { + return nil, false, errors.TraceNew("missing received packet") + } + + if r.sharingSession || r.session.isEstablished() { + + // When sharing an established and ready session, or once an owned + // session is established, the next packet is post-handshake and + // should be the round trip request response. + + // Pre-unwrap here to check for a ResetSessionToken packet. + + sessionPacket, err := unwrapSessionPacket( + r.session.receiveObfuscationSecret, true, nil, receivedPacket) + if err != nil { + return nil, false, errors.Trace(err) + } + + // Reset the session when the packet is a valid ResetSessionToken. The + // responder sends a ResetSessionToken when this initiator attempts + // to use an expired session. A ResetSessionToken is valid when it's + // signed by the responder's public key and is bound to the last + // packet sent from this initiator (which protects against replay). + + if sessionPacket.ResetSessionToken != nil && + isValidResetSessionToken( + r.responderPublicKey, + r.lastSentPacket.Bytes(), + sessionPacket.ResetSessionToken) { + + // removeIfSession won't clobber any other, concurrently + // established session for the same responder. + r.initiatorSessions.removeIfSession(r.responderPublicKey, r.session) + r.session = nil + + sendPacket, isRequestPacket, err := beginOrShareSession() + if err != nil { + return nil, false, errors.Trace(err) + } + return sendPacket, isRequestPacket, nil + } + + responsePayload, err := r.session.receiveUnmarshaledPacket(sessionPacket) + if err != nil { + return nil, false, errors.Trace(err) + } + + var sessionRoundTrip SessionRoundTrip + err = unmarshalRecord(recordTypeSessionRoundTrip, responsePayload, &sessionRoundTrip) + if err != nil { + return nil, false, errors.Trace(err) + } + + // Check that the response RoundTripID matches the request RoundTripID. + + if sessionRoundTrip.RoundTripID != r.roundTripID { + return nil, false, errors.TraceNew("unexpected round trip ID") + } + + // Store the response so it can be retrieved later. + + r.response = sessionRoundTrip.Payload + return nil, false, nil + } + + // Continue the handshake. Since the first payload is sent to the + // responder along with the initiator's last handshake message, there's + // no sendPacket call in the owned session case. The last + // nextHandshakePacket will bundle it. Also, the payload output of + // nextHandshakePacket is ignored, as only a responder will receive a + // payload in a handshake message. + + isEstablished, sendPacket, _, err := r.session.nextHandshakePacket(receivedPacket) + if err != nil { + return nil, false, errors.Trace(err) + } + + if isEstablished { + + // Retain the most recently established session as the cached session + // for reuse. This should be a no-op in the isNew case and only have + // an effect for !inNew and !waitToShareSession. Modifying the + // initiatorSessions map entry should not impact any concurrent + // handshakes, as each InitiatorRoundTrip maintains its own reference + // to its session. + + r.initiatorSessions.setSession(r.responderPublicKey, r.session) + } + + return sendPacket, isEstablished, nil +} + +// TransportFailed marks any owned, not yet ready-to-share session as failed +// and signals any other initiators waiting to share the session. +// +// TransportFailed should be called when using waitToShareSession and when +// there is a transport level failure to relay a session packet. +func (r *InitiatorRoundTrip) TransportFailed() { + + r.mutex.Lock() + defer r.mutex.Unlock() + + if !r.sharingSession && !r.session.isReadyToShare(nil) { + r.session.transportFailed() + r.initiatorSessions.removeIfSession(r.responderPublicKey, r.session) + } +} + +// Response returns the round trip response. Call Response after Next returns +// nil for the next packet to send, indicating that the round trip is +// complete. +func (r *InitiatorRoundTrip) Response() ([]byte, error) { + + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.response == nil { + return nil, errors.TraceNew("no response") + } + + return r.response, nil +} + +// ResponderSessions is a set of secure Noise protocol sessions for an +// responder. For in-proxy, brokers respond to clients and proxies and +// servers respond to brokers. +// +// Secure sessions provide encryption, authentication of the responder, +// identity hiding for the initiator, forward secrecy, and anti-replay for +// application data. +// +// ResponderSessions maintains a cache of established sessions to minimizes +// round trips and overhead as initiators are expected to make multiple round +// trips. The cache has a TTL and maximum size with LRU to cap overall memory +// usage. A broker may receive requests from millions of clients and proxies +// and so only more recent sessions will be retained. Servers will receive +// requests from only a handful of brokers, and so the TTL is not applied. +// +// Multiple, concurrent sessions for a single initiator public key are +// supported. +type ResponderSessions struct { + privateKey SessionPrivateKey + sendObfuscationSecret ObfuscationSecret + receiveObfuscationSecret ObfuscationSecret + applyTTL bool + obfuscationReplayHistory *obfuscationReplayHistory + expectedInitiatorPublicKeys *sessionPublicKeyLookup + + mutex sync.Mutex + sessions *lrucache.Cache +} + +// NewResponderSessions creates a new ResponderSessions which allows any +// initiators to establish a session. A TTL is applied to cached sessions. +func NewResponderSessions( + responderPrivateKey SessionPrivateKey, + responderRootObfuscationSecret ObfuscationSecret) (*ResponderSessions, error) { + + sendObfuscationSecret, receiveObfuscationSecret, err := + deriveSessionPacketObfuscationSecrets(responderRootObfuscationSecret, true) + if err != nil { + return nil, errors.Trace(err) + } + + return &ResponderSessions{ + privateKey: responderPrivateKey, + sendObfuscationSecret: sendObfuscationSecret, + receiveObfuscationSecret: receiveObfuscationSecret, + applyTTL: true, + obfuscationReplayHistory: newObfuscationReplayHistory(), + sessions: lrucache.NewWithLRU(sessionsTTL, 1*time.Minute, sessionsMaxSize), + }, nil +} + +// NewResponderSessionsForKnownInitiators creates a new ResponderSessions +// which allows only allow-listed initiators to establish a session. No TTL +// is applied to cached sessions. +// +// The NewResponderSessionsForKnownInitiators configuration is for Psiphon +// servers responding to brokers. Only a handful of brokers are expected to +// be deployed. A relatively small allow list of expected broker public keys +// is easy to manage, deploy, and update. No TTL is applied to keep the +// sessions established as much as possible and avoid extra client-relayed +// round trips for BrokerServerRequests. +func NewResponderSessionsForKnownInitiators( + responderPrivateKey SessionPrivateKey, + responderRootObfuscationKey ObfuscationSecret, + initiatorPublicKeys []SessionPublicKey) (*ResponderSessions, error) { + + s, err := NewResponderSessions(responderPrivateKey, responderRootObfuscationKey) + if err != nil { + return nil, errors.Trace(err) + } + + s.expectedInitiatorPublicKeys, err = newSessionPublicKeyLookup(initiatorPublicKeys) + if err != nil { + return nil, errors.Trace(err) + } + + return s, nil +} + +// SetKnownInitiatorPublicKeys updates the set of initiator public keys which +// are allowed to establish sessions with the responder. Any existing +// sessions with keys not in the new list are deleted. Existing sessions with +// keys which remain in the list are retained. +func (s *ResponderSessions) SetKnownInitiatorPublicKeys( + initiatorPublicKeys []SessionPublicKey) error { + + s.mutex.Lock() + defer s.mutex.Unlock() + + changed, err := s.expectedInitiatorPublicKeys.set(initiatorPublicKeys) + if err != nil { + return errors.Trace(err) + } + + if !changed { + // With an identical public key set there are no sessions to be reset + return nil + } + + // Delete sessions for removed keys; retain established sessions for + // still-valid keys. + // + // Limitations: + // - Doesn't interrupt a concurrent request in progress which has already + // called getSession + // - lrucache doesn't have iterator; Items creates a full copy of the + // cache state + + for sessionIDStr, entry := range s.sessions.Items() { + + // Each session.hasUnexpectedInitiatorPublicKey indirectly references + // s.expectedInitiatorPublicKeys, which was updated above with the + // new set of valid public keys. + if entry.Object.(*session).hasUnexpectedInitiatorPublicKey() { + s.sessions.Delete(sessionIDStr) + } + } + + return nil +} + +// RequestHandler is an application-level handler that receives the decrypted +// request payload and returns a response payload to be encrypted and sent to +// the initiator. The initiatorID is the authenticated identifier of the +// initiator: client, proxy, or broker. +// +// In cases where a request is a one-way message, with no response, such as a +// BrokerServerReport, RequestHandler should return a nil packet. +type RequestHandler func(initiatorID ID, request []byte) ([]byte, error) + +// HandlePacket takes a session packet, as received at the transport level, +// and handles session handshake and request decryption. While a session +// handshakes, HandlePacket returns the next handshake message to be relayed +// back to the initiator over the transport. +// +// Once a session is fully established and a request is decrypted, the inner +// request payload is passed to the RequestHandler for application-level +// processing. The response received from the RequestHandler will be +// encrypted with the session and returned from HandlePacket as the next +// packet to send back over the transport. If there is no response to +// be returned, HandlePacket returns a nil packet. +// +// The session packet contains a session ID that is used to route packets from +// many initiators to the correct session state. +// +// Above the Noise protocol security layer, session packets have an +// obfuscation layer. If a packet doesn't authenticate with the expected +// obfuscation secret, or if a packet is replayed, HandlePacket returns an +// error. The obfuscation anti-replay layer covers replays of Noise handshake +// messages which aren't covered by the Noise nonce anti-replay. When +// HandlePacket returns an error, the caller should invoke anti-probing +// behavior, such as returning a generic 404 error from an HTTP server for +// HTTPS transports. +// +// There is one expected error case with legitimate initiators: when an +// initiator reuses a session that is expired or no longer in the responder +// cache. In this case HandlePacket will return a reset session token in +// outPacket along with an error, and the caller should log the error and +// also send the packet to the initiator. +// +// The HandlePacket caller should implement initiator rate limiting in its +// transport level. +func (s *ResponderSessions) HandlePacket( + inPacket []byte, + requestHandler RequestHandler) (retOutPacket []byte, retErr error) { + + // Concurrency: no locks are held for this function, only in specific + // helper functions. + + // unwrapSessionPacket deobfuscates the session packet, and unmarshals a + // SessionPacket. The SessionPacket.SessionID is used to route the + // session packet to an existing session or to create a new one. The + // SessionPacket.Payload is a Noise handshake message or an encrypted + // request and that will be handled below. + + sessionPacket, err := unwrapSessionPacket( + s.receiveObfuscationSecret, false, s.obfuscationReplayHistory, inPacket) + if err != nil { + return nil, errors.Trace(err) + } + + sessionID := sessionPacket.SessionID + + // Check for an existing session with this session ID, or create a new one + // if not found. If the session _was_ in the cache but is now expired, a + // new session is created, but subsequent Noise operations will fail. + + session, err := s.getSession(sessionID) + if err != nil { + return nil, errors.Trace(err) + } + + retainSession := false + + defer func() { + if retErr != nil && !retainSession { + + // If an error is returned, the session has failed, so don't + // retain it in the cache as it could be more recently used than + // an older but still valid session. + // + // TODO: should we retain the session if it has completed the + // handshake? As with initiator error signals, and depending on + // the transport security level, a SessionPacket with a + // legitimate session ID but corrupt Noise payload could be + // forged, terminating a legitimate session. + + s.removeSession(sessionID) + } + }() + + var requestPayload []byte + + if session.isEstablished() { + + // When the session is already established, decrypt the packet to get + // the request. + + payload, err := session.receiveUnmarshaledPacket(sessionPacket) + if err != nil { + return nil, errors.Trace(err) + } + requestPayload = payload + + } else { + + // When the session is not established, the packet is the next + // handshake message. The initiator appends the request payload to + // the end of its last XK handshake message, and in that case payload + // will contain the request. + + isEstablished, outPacket, payload, err := + session.nextUnmarshaledHandshakePacket(sessionPacket) + if err != nil { + + if _, ok := err.(potentialExpiredSessionError); !ok { + return nil, errors.Trace(err) + } + + // The initiator may be trying to use a previously valid session + // which is now expired or flushed, due to a full cache or a + // server reboot. Craft and send a secure reset session token, + // signed with the responder public key (the Ed25519 + // representation), bound to the packet just received from the + // initiator (to defend against replay). + + outPacket, wrapErr := wrapSessionPacket( + s.sendObfuscationSecret, + false, + &SessionPacket{ + SessionID: sessionPacket.SessionID, + ResetSessionToken: makeResetSessionToken(s.privateKey, inPacket), + }) + if wrapErr != nil { + return nil, errors.Trace(wrapErr) + } + + return outPacket, errors.Trace(err) + } + + if outPacket != nil { + + // The handshake is not complete until outPacket is nil; send the + // next handshake packet. + + if payload != nil { + + // A payload is not expected unless the handshake is complete. + return nil, errors.TraceNew("unexpected handshake payload") + } + + // The session TTL is not extended here. Initiators, including + // clients and proxies, are given sessionsTTL to complete the + // entire handshake. + + return outPacket, nil + } + + if !isEstablished || payload == nil { + + // When outPacket is nil, the handshake should be complete -- + // isEstablished -- and, by convention, the first request payload + // should be available. + + return nil, errors.TraceNew("unexpected established state") + } + + requestPayload = payload + } + + // Extend the session TTL. + s.touchSession(sessionID, session) + + initiatorID, err := session.getPeerID() + if err != nil { + return nil, errors.Trace(err) + } + + var sessionRoundTrip SessionRoundTrip + err = unmarshalRecord(recordTypeSessionRoundTrip, requestPayload, &sessionRoundTrip) + if err != nil { + return nil, errors.Trace(err) + } + + request := sessionRoundTrip.Payload + + response, err := requestHandler(initiatorID, request) + if err != nil { + + // Don't delete the session if the application-level request handler + // returns an error, as there is no problem with the Noise session. + // Non-failure application-level errors can include cases like a + // fronting CDN aborting a request due to timeout misalignment. + retainSession = true + + return nil, errors.Trace(err) + } + + if response == nil { + // There is no response. + return nil, nil + } + + // The response is assigned the same RoundTripID as the request. + sessionRoundTrip = SessionRoundTrip{ + RoundTripID: sessionRoundTrip.RoundTripID, + Payload: response, + } + + responsePayload, err := marshalRecord( + sessionRoundTrip, recordTypeSessionRoundTrip) + if err != nil { + return nil, errors.Trace(err) + } + + responsePacket, err := session.sendPacket(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + + return responsePacket, nil +} + +// touchSession sets a cached session for the specified session ID; if the +// session is already in the cache, its TTL is extended. The LRU session +// cache entry may be discarded once the cache is full. +func (s *ResponderSessions) touchSession(sessionID ID, session *session) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if !session.hasUnexpectedInitiatorPublicKey() { + + // In this case, SetKnownInitiatorPublicKeys was called concurrent to + // HandlePacket, after HandlePacket's getSession, and now the known + // initiator public key for this session is no longer valid; don't + // cache or extend the session, as that could revert a session flush + // performed in SetKnownInitiatorPublicKeys. + // + // Limitation: this won't interrupt a handshake in progress, which may + // complete, but then ultimately fail. + return + } + + TTL := lrucache.DefaultExpiration + if !s.applyTTL { + TTL = lrucache.NoExpiration + } + s.sessions.Set(string(sessionID[:]), session, TTL) +} + +// getSession returns an existing session for the specified session ID, or +// creates a new session, and places it in the cache, if not found. +func (s *ResponderSessions) getSession(sessionID ID) (*session, error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + strSessionID := string(sessionID[:]) + + entry, ok := s.sessions.Get(strSessionID) + if ok { + return entry.(*session), nil + } + + session, err := newSession( + false, // !isInitiator + s.privateKey, + s.sendObfuscationSecret, + s.receiveObfuscationSecret, + s.obfuscationReplayHistory, + nil, + nil, + &sessionID, + s.expectedInitiatorPublicKeys) + if err != nil { + return nil, errors.Trace(err) + } + + s.sessions.Set( + strSessionID, session, lrucache.DefaultExpiration) + + return session, nil +} + +// removeSession removes any existing session for the specified session ID. +func (s *ResponderSessions) removeSession(sessionID ID) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + s.sessions.Delete(string(sessionID[:])) +} + +// makeResetSessionToken creates a secure reset session token. +// +// This token is used for a responder to signal to an initiator that a session +// has expired, or is no longer valid and that a new session should be +// established. Securing this signal is particularly important for the +// broker/server sessions relayed by untrusted clients, as it prevents a +// malicious client from injecting invalid reset tokens and +// interrupting/degrading session performance. +// +// A reset token is signed by the responder's Ed25519 public key. The signature covers: +// - The last packet received from the initiator, mitigating replay attacks +// - A context name, resetSessionTokenName, and nonce which mitigates against +// directly signing arbitrary data in the untrusted last packet received +// from the initiator +// +// Reset session tokens are not part of the Noise protocol, but are sent as +// session packets. +func makeResetSessionToken( + privateKey SessionPrivateKey, + receivedPacket []byte) []byte { + + var token bytes.Buffer + token.Write(prng.Bytes(resetSessionTokenNonceSize)) + + h := sha256.New() + h.Write([]byte(resetSessionTokenName)) + h.Write(token.Bytes()[:resetSessionTokenNonceSize]) + h.Write(receivedPacket) + + token.Write(ed25519.Sign(privateKey[:], h.Sum(nil))) + + return token.Bytes() +} + +// isValidResetSessionToken checks if a reset session token is valid, given +// the specified responder public key and last packet sent to the responder. +func isValidResetSessionToken( + publicKey SessionPublicKey, + lastSentPacket []byte, + token []byte) bool { + + if len(token) <= resetSessionTokenNonceSize { + return false + } + + h := sha256.New() + h.Write([]byte(resetSessionTokenName)) + h.Write(token[:resetSessionTokenNonceSize]) + h.Write(lastSentPacket) + + return ed25519.Verify(publicKey[:], h.Sum(nil), token[resetSessionTokenNonceSize:]) +} + +// sessionPublicKeyLookup implements set membership lookup for session public +// keys, and is used to lookup expected public keys for optional responder +// access control. The sessionPublicKeyLookup is initialized with a list of +// Ed25519 session public keys, the canonical representation, while the +// lookup is done with Curve25519 public keys, the representation that is +// received via the Noise protocol. +type sessionPublicKeyLookup struct { + mutex sync.Mutex + lookupMap map[SessionPublicKeyCurve25519]struct{} +} + +func newSessionPublicKeyLookup(publicKeys []SessionPublicKey) (*sessionPublicKeyLookup, error) { + s := &sessionPublicKeyLookup{ + lookupMap: make(map[SessionPublicKeyCurve25519]struct{}), + } + _, err := s.set(publicKeys) + if err != nil { + return nil, errors.Trace(err) + } + return s, nil +} + +// set modifies the lookup set of session public keys and returns true if the +// set has changed. +func (s *sessionPublicKeyLookup) set(publicKeys []SessionPublicKey) (bool, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + // Convert the Ed25519 public key to its Curve25519 representation, which + // is what's looked up. SessionPublicKeyCurve25519 is a fixed-size array + // which can be used as a map key. + var curve25519PublicKeys []SessionPublicKeyCurve25519 + for _, publicKey := range publicKeys { + k, err := publicKey.ToCurve25519() + if err != nil { + return false, errors.Trace(err) + } + curve25519PublicKeys = append(curve25519PublicKeys, k) + } + + // Check if the set of public keys has changed. This check and return + // value is used by ResponderSessions.SetKnownInitiatorPublicKeys to skip + // checking for sessions to be revoked in the case of an overall tactics + // reload in which configured expected public keys did not change. + if len(curve25519PublicKeys) == len(s.lookupMap) { + allFound := true + for _, k := range curve25519PublicKeys { + if _, ok := s.lookupMap[k]; !ok { + allFound = false + break + } + } + if allFound { + return false, nil + } + } + + lookupMap := make(map[SessionPublicKeyCurve25519]struct{}) + for _, k := range curve25519PublicKeys { + + lookupMap[k] = struct{}{} + } + + s.lookupMap = lookupMap + + return true, nil +} + +func (s *sessionPublicKeyLookup) lookup(k SessionPublicKeyCurve25519) bool { + s.mutex.Lock() + defer s.mutex.Unlock() + + _, ok := s.lookupMap[k] + return ok +} + +type sessionState int + +const ( + + /* + + XK: + <- s + ... + -> e, es + <- e, ee + -> s, se [+ first payload] + + */ + + sessionStateInitiator_XK_send_e_es = iota + sessionStateInitiator_XK_recv_e_ee_send_s_se_payload + sessionStateInitiator_XK_established + sessionStateInitiator_failed + + sessionStateResponder_XK_recv_e_es_send_e_ee + sessionStateResponder_XK_recv_s_se_payload + sessionStateResponder_XK_established +) + +// session represents a Noise protocol session, including its initial +// handshake state. +// +// The XK pattern is used: +// - Initiators may have short-lived static keys (clients), or long-lived +// static keys (proxies and brokers). The initiator key is securely +// transmitted to the responder while hiding its value. +// - The responder static key is always known (K) and exchanged out of +// band. +// - Provides forward secrecy. +// - The round trip request can be appended to the initiators final +// handshake message, eliminating an extra round trip. +// +// For in-proxy, any client or proxy can connect to a broker. Only allowed +// brokers can connect to a server. +// +// To limit access to allowed brokers, expectedInitiatorPublicKeys is an allow +// list of broker public keys. XK is still used for this case, instead of +// KK: +// - With KK, the broker identity would have to be known before the Noise +// handshake begins +// - With XK, the broker proves possession of a private key corresponding to +// a broker public key on the allow list. +// - While KK will abort sooner than XK when an invalid broker key is used, +// completing the handshake and decrypting the first payload does not +// leak any information. +// +// The is no "close" operation for sessions. Responders will maintain a cache +// of established sessions and discard the state for expired sessions or in +// an LRU fashion. Initiators will reuse sessions until they are rejected by +// a responder. +// +// There is no state for the obfuscation layer; each packet is obfuscated +// independently since session packets may arrive at a peer out-of-order. +// +// There are independent replay defenses at both the obfuscation layer +// (to mitigate active probing replays) and at the Noise protocol layer +// (to defend against replay of Noise protocol packets). The obfuscation +// anti-replay covers all obfuscated packet nonce values, and the Noise +// anti-replay filter covers post-handshake packet message sequence number +// nonces. The Noise layer anti-replay filter uses a sliding window of size +// ~8000, allowing for approximately that degree of out-of-order packets as +// could happen with concurrent requests in a shared session. +// +// Future enhancement: use a single anti-replay mechanism for both use cases? +type session struct { + isInitiator bool + sessionID ID + sendObfuscationSecret ObfuscationSecret + receiveObfuscationSecret ObfuscationSecret + replayHistory *obfuscationReplayHistory + expectedInitiatorPublicKeys *sessionPublicKeyLookup + + mutex sync.Mutex + state sessionState + signalAwaitingReady []chan struct{} + handshake *noise.HandshakeState + firstPayload []byte + peerPublicKey []byte + send *noise.CipherState + receive *noise.CipherState + nonceReplay replay.Filter +} + +func newSession( + isInitiator bool, + privateKey SessionPrivateKey, + sendObfuscationSecret ObfuscationSecret, + receiveObfuscationSecret ObfuscationSecret, + replayHistory *obfuscationReplayHistory, + + // Initiator + expectedResponderPublicKey *SessionPublicKey, + firstPayload []byte, + + // Responder + peerSessionID *ID, + expectedInitiatorPublicKeys *sessionPublicKeyLookup) (*session, error) { + + if isInitiator { + if peerSessionID != nil || + expectedResponderPublicKey == nil || + expectedInitiatorPublicKeys != nil || + firstPayload == nil { + return nil, errors.TraceNew("unexpected initiator parameters") + } + } else { + if peerSessionID == nil || + expectedResponderPublicKey != nil || + firstPayload != nil { + return nil, errors.TraceNew("unexpected responder parameters") + } + } + + sessionID := peerSessionID + if sessionID == nil { + ID, err := MakeID() + if err != nil { + return nil, errors.Trace(err) + } + sessionID = &ID + } + + // The prologue binds the session ID and other meta data to the session. + + prologue, err := protocol.CBOREncoding.Marshal(SessionPrologue{ + SessionProtocolName: SessionProtocolName, + SessionProtocolVersion: SessionProtocolVersion1, + SessionID: *sessionID, + }) + if err != nil { + return nil, errors.Trace(err) + } + + publicKey, err := privateKey.GetPublicKey() + if err != nil { + return nil, errors.Trace(err) + } + + privateKeyCurve25519 := privateKey.ToCurve25519() + publicKeyCurve25519, err := publicKey.ToCurve25519() + if err != nil { + return nil, errors.Trace(err) + } + + // SessionProtocolVersion1 implies this ciphersuite + + config := noise.Config{ + CipherSuite: noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashBLAKE2b), + Pattern: noise.HandshakeXK, + Initiator: isInitiator, + Prologue: prologue, + StaticKeypair: noise.DHKey{ + Public: publicKeyCurve25519[:], + Private: privateKeyCurve25519}, + } + + if expectedResponderPublicKey != nil { + k, err := (*expectedResponderPublicKey).ToCurve25519() + if err != nil { + return nil, errors.Trace(err) + } + config.PeerStatic = k[:] + } + + handshake, err := noise.NewHandshakeState(config) + if err != nil { + return nil, errors.Trace(err) + } + + var state sessionState + if isInitiator { + state = sessionStateInitiator_XK_send_e_es + } else { + state = sessionStateResponder_XK_recv_e_es_send_e_ee + } + + return &session{ + isInitiator: isInitiator, + sessionID: *sessionID, + sendObfuscationSecret: sendObfuscationSecret, + receiveObfuscationSecret: receiveObfuscationSecret, + replayHistory: replayHistory, + expectedInitiatorPublicKeys: expectedInitiatorPublicKeys, + state: state, + signalAwaitingReady: make([]chan struct{}, 0), // must be non-nil + handshake: handshake, + firstPayload: firstPayload, + }, nil +} + +// isEstablished indicates that the session handshake is complete. +// +// A session may not be ready to share when isEstablished is true. +func (s *session) isEstablished() bool { + + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.handshake == nil +} + +// isReadyToShare indicates that the session handshake is complete _and_ that +// the peer is known to have received and processed the final handshake +// message. +// +// When isReadyToShare is true, multiple round trips can use a session +// concurrently. Requests from different round trips can arrive at the peer +// out-of-order. +// +// Session sharing is performed by initiators, and in the XK handshake the +// last step is the initiator sends a final message to the responder. While +// the initiator session becomes "established" after that last message is +// output, we need to delay other round trips from sharing the session and +// sending session-encrypted packets to the responder before the responder +// actually receives that final handshake message. +// +// isReadyToShare becomes true once the round trip performing the handshake +// receives its round trip response, which demonstrates that the responder +// received the final message. +// +// When a signal channel is specified, it is registered and signaled once the +// session becomes ready to share _or_ the session fails to become ready due +// to a transport failure. When signaled, the caller must call isReadyToShare +// once again to distinguish between these two outcomes. +func (s *session) isReadyToShare(signal chan struct{}) bool { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.isInitiator || s.state == sessionStateInitiator_failed { + // Signal immediately if transportFailed was already called. + if signal != nil { + close(signal) + } + return false + } + + if s.handshake == nil && s.signalAwaitingReady == nil { + return true + } + + if signal != nil { + s.signalAwaitingReady = append( + s.signalAwaitingReady, signal) + } + + return false +} + +// transportFailed marks the session as failed and signals any initiators +// waiting to share the session. +// +// transportFailed is ignored if the session is already ready to share, as any +// transport failures past that point affect only one application-level round +// trip and not the session. +func (s *session) transportFailed() { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.isInitiator { + return + } + + // Already ready to share, so ignore the transport failure. + if s.handshake == nil && s.signalAwaitingReady == nil { + return + } + + if s.state == sessionStateInitiator_failed { + return + } + + // In the sessionStateInitiator_failed state, nextHandshakePacket will + // always fail. + s.state = sessionStateInitiator_failed + + for _, signal := range s.signalAwaitingReady { + close(signal) + } + s.signalAwaitingReady = nil +} + +// getPeerID returns the peer's public key, in the form of an ID. A given peer +// identifier can only be provided by the peer with the corresponding private +// key. +func (s *session) getPeerID() (ID, error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + var peerID ID + + if s.handshake != nil { + return peerID, errors.TraceNew("not established") + } + + if len(s.peerPublicKey) != len(peerID) { + return peerID, errors.TraceNew("invalid peer public key") + } + + copy(peerID[:], s.peerPublicKey) + + return peerID, nil +} + +// hasUnexpectedInitiatorPublicKey indicates whether the session is +// established (and so has obtained a peer public key), +// expectedInitiatorPublicKeys is configured, and the session initiator's +// public key is not in/no longer in expectedInitiatorPublicKeys. +func (s *session) hasUnexpectedInitiatorPublicKey() bool { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.expectedInitiatorPublicKeys == nil { + // Not expecting specific initiator public keys + return false + } + + if s.handshake != nil { + // Peer public key not known yet + return false + } + + var k SessionPublicKeyCurve25519 + copy(k[:], s.peerPublicKey) + + return !s.expectedInitiatorPublicKeys.lookup(k) +} + +// sendPacket prepares a session packet to be sent to the peer, containing the +// specified round trip payload. The packet is secured by the established +// session. +func (s *session) sendPacket(payload []byte) ([]byte, error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.handshake != nil { + return nil, errors.TraceNew("not established") + } + + if s.send == nil { + return nil, errors.Trace(s.unexpectedStateError()) + } + + nonce := s.send.Nonce() + + // Unlike tunnels, for example, sessions are not for bulk data transfer + // and we don't aim for zero allocation or extensive buffer reuse. + + encryptedPayload, err := s.send.Encrypt(nil, nil, payload) + if err != nil { + return nil, errors.Trace(err) + } + + sessionPacket, err := s.wrapPacket( + &SessionPacket{ + SessionID: s.sessionID, + Nonce: nonce, + Payload: encryptedPayload, + }) + if err != nil { + return nil, errors.Trace(err) + } + + return sessionPacket, nil + +} + +// receivePacket opens a session packet received from the peer, using the +// established session, and returns the round trip payload. +// +// As responders need to inspect the packet and use its session ID to route +// packets to the correct session, responders will call +// receiveUnmarshaledPacket instead. +func (s *session) receivePacket(packet []byte) ([]byte, error) { + + sessionPacket, err := s.unwrapPacket(packet) + if err != nil { + return nil, errors.Trace(err) + } + + payload, err := s.receiveUnmarshaledPacket(sessionPacket) + if err != nil { + return nil, errors.Trace(err) + } + + return payload, nil +} + +func (s *session) receiveUnmarshaledPacket( + sessionPacket *SessionPacket) ([]byte, error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.receive == nil { + return nil, errors.Trace(s.unexpectedStateError()) + } + + if sessionPacket.SessionID != s.sessionID { + return nil, errors.Tracef("unexpected sessionID") + } + + s.receive.SetNonce(sessionPacket.Nonce) + + payload, err := s.receive.Decrypt(nil, nil, sessionPacket.Payload) + if err != nil { + return nil, errors.Trace(err) + } + + if !s.nonceReplay.ValidateCounter(sessionPacket.Nonce, math.MaxUint64) { + return nil, errors.TraceNew("replay detected") + } + + // The session is ready to share once it's received a post-handshake + // response from the peer. + + s.readyToShare() + + return payload, nil +} + +// nextHandshakePacket advances the session handshake. nextHandshakePacket +// takes the next handshake packet received from the peer and returns the +// next handshake packet to send to the peer. Start by passing nil for +// inPacket. The handshake is complete when outPacket is nil. +// +// XK bundles the first initiator request payload along with a handshake +// message, and nextHandshakePacket output that payload to the responder when +// the handshake is complete. +// +// Once the handshake is complete, further round trips are exchanged using +// sendPacket and receivePacket. +// +// As responders need to inspect the packet and use its session ID to route +// packets to the correct session, responders will call +// nextUnmarshaledHandshakePacket instead. +func (s *session) nextHandshakePacket(inPacket []byte) ( + isEstablished bool, outPacket []byte, payload []byte, err error) { + + var sessionPacket *SessionPacket + if inPacket != nil { + sessionPacket, err = s.unwrapPacket(inPacket) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + } + + isEstablished, outPacket, payload, err = + s.nextUnmarshaledHandshakePacket(sessionPacket) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + + return isEstablished, outPacket, payload, nil +} + +// potentialExpiredSessionError is packet error that indicates a potential +// expired session condition which should be handled with a reset session +// token. This includes the responder expecting a handshake packet for a new +// session, but receiving a non-handshake packet. +// Non-potentialExpiredSessionError errors include +// "unexpected initiator public key". +type potentialExpiredSessionError struct { + error +} + +func (s *session) nextUnmarshaledHandshakePacket(sessionPacket *SessionPacket) ( + isEstablished bool, outPacket []byte, payload []byte, err error) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + var in []byte + if sessionPacket != nil { + if sessionPacket.SessionID != s.sessionID { + return false, nil, nil, errors.Tracef("unexpected sessionID") + } + if sessionPacket.Nonce != 0 { + + // A handshake message was expected, but this packet contains a + // post-handshake nonce, Flag this as a potential expired session + // case. See comment below for limitation. + return false, nil, nil, + potentialExpiredSessionError{errors.TraceNew("unexpected nonce")} + } + in = sessionPacket.Payload + } + + // Handle handshake state transitions. + + switch s.state { + + // Initiator + + case sessionStateInitiator_XK_send_e_es: + out, _, _, err := s.handshake.WriteMessage(nil, nil) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + outPacket, err := s.wrapPacket( + &SessionPacket{SessionID: s.sessionID, Payload: out}) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + s.state = sessionStateInitiator_XK_recv_e_ee_send_s_se_payload + return false, outPacket, nil, nil + + case sessionStateInitiator_XK_recv_e_ee_send_s_se_payload: + _, _, _, err := s.handshake.ReadMessage(nil, in) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + out, send, receive, err := s.handshake.WriteMessage(nil, s.firstPayload) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + outPacket, err := s.wrapPacket( + &SessionPacket{SessionID: s.sessionID, Payload: out}) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + s.state = sessionStateInitiator_XK_established + s.established(send, receive) + return true, outPacket, nil, nil + + // Responder + + case sessionStateResponder_XK_recv_e_es_send_e_ee: + _, _, _, err := s.handshake.ReadMessage(nil, in) + if err != nil { + + // A handshake message was expected, but and invalid message type + // was received. Flag this as a potential expired session case, a + // candidate for a reset session token. Limitation: there's no + // check that the invalid message was, in fact, a valid message + // for an expired session; this may not be possible given the + // established-session Noise protocol message is encrypted/random. + return false, nil, nil, potentialExpiredSessionError{errors.Trace(err)} + } + out, _, _, err := s.handshake.WriteMessage(nil, nil) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + outPacket, err := s.wrapPacket( + &SessionPacket{SessionID: s.sessionID, Payload: out}) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + s.state = sessionStateResponder_XK_recv_s_se_payload + return false, outPacket, nil, nil + + case sessionStateResponder_XK_recv_s_se_payload: + firstPayload, receive, send, err := s.handshake.ReadMessage(nil, in) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + + // Check if the initiator's public key in on the allow list. + // + // Limitation: unlike with the KK pattern, the handshake completes and + // the initial payload is decrypted even when the initiator public + // key is not on the allow list. + + err = s.checkExpectedInitiatorPublicKeys(s.handshake.PeerStatic()) + if err != nil { + return false, nil, nil, errors.Trace(err) + } + s.state = sessionStateResponder_XK_established + s.established(send, receive) + return true, nil, firstPayload, nil + } + + return false, nil, nil, errors.Trace(s.unexpectedStateError()) +} + +func (s *session) checkExpectedInitiatorPublicKeys(peerPublicKey []byte) error { + + if s.expectedInitiatorPublicKeys == nil { + return nil + } + + var k SessionPublicKeyCurve25519 + copy(k[:], peerPublicKey) + + ok := s.expectedInitiatorPublicKeys.lookup(k) + + if !ok { + return errors.TraceNew("unexpected initiator public key") + } + + return nil +} + +// Set the session as established. +func (s *session) established( + send *noise.CipherState, + receive *noise.CipherState) { + + // Assumes s.mutex lock is held. + + s.peerPublicKey = s.handshake.PeerStatic() + s.handshake = nil + s.firstPayload = nil + s.send = send + s.receive = receive +} + +// Set the session as ready to share. +func (s *session) readyToShare() { + + // Assumes s.mutex lock is held. + + if !s.isInitiator { + return + } + + if s.signalAwaitingReady == nil { + return + } + + for _, signal := range s.signalAwaitingReady { + close(signal) + } + s.signalAwaitingReady = nil +} + +// Marshal and obfuscate a SessionPacket. +func (s *session) wrapPacket(sessionPacket *SessionPacket) ([]byte, error) { + + // No lock. References only static session fields. + + obfuscatedPacket, err := wrapSessionPacket( + s.sendObfuscationSecret, + s.isInitiator, + sessionPacket) + if err != nil { + return nil, errors.Trace(err) + } + + return obfuscatedPacket, nil + +} + +// Marshal and obfuscated a SessionPacket. wrapSessionPacket is used by +// responders to wrap reset session token packets. +func wrapSessionPacket( + sendObfuscationSecret ObfuscationSecret, + isInitiator bool, + sessionPacket *SessionPacket) ([]byte, error) { + + marshaledPacket, err := marshalRecord( + sessionPacket, recordTypeSessionPacket) + if err != nil { + return nil, errors.Trace(err) + } + + obfuscatedPacket, err := obfuscateSessionPacket( + sendObfuscationSecret, + isInitiator, + marshaledPacket, + sessionObfuscationPaddingMinSize, + sessionObfuscationPaddingMaxSize) + if err != nil { + return nil, errors.Trace(err) + } + + return obfuscatedPacket, nil +} + +// Deobfuscate and unmarshal a SessionPacket. +func (s *session) unwrapPacket(obfuscatedPacket []byte) (*SessionPacket, error) { + + // No lock. References only static session fields. + + sessionPacket, err := unwrapSessionPacket( + s.receiveObfuscationSecret, + s.isInitiator, + s.replayHistory, + obfuscatedPacket) + if err != nil { + return nil, errors.Trace(err) + } + + return sessionPacket, nil + +} + +// Deobfuscate and unmarshal SessionPacket. unwrapSessionPacket is used by +// responders, which must peak at the SessionPacket and get the session ID to +// route packets to the correct session. +func unwrapSessionPacket( + receiveObfuscationSecret ObfuscationSecret, + isInitiator bool, + replayHistory *obfuscationReplayHistory, + obfuscatedPacket []byte) (*SessionPacket, error) { + + packet, err := deobfuscateSessionPacket( + receiveObfuscationSecret, + isInitiator, + replayHistory, + obfuscatedPacket) + if err != nil { + return nil, errors.Trace(err) + } + + var sessionPacket *SessionPacket + err = unmarshalRecord(recordTypeSessionPacket, packet, &sessionPacket) + if err != nil { + return nil, errors.Trace(err) + } + + return sessionPacket, nil +} + +// Create an error that includes the current handshake state. +func (s *session) unexpectedStateError() error { + + s.mutex.Lock() + defer s.mutex.Unlock() + + return errors.Tracef("unexpected state: %v", s.state) +} diff --git a/psiphon/common/inproxy/session_test.go b/psiphon/common/inproxy/session_test.go new file mode 100644 index 000000000..8e238b5e1 --- /dev/null +++ b/psiphon/common/inproxy/session_test.go @@ -0,0 +1,796 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "bytes" + "context" + "fmt" + "math" + "strings" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/flynn/noise" + "golang.zx2c4.com/wireguard/replay" +) + +func TestSessions(t *testing.T) { + err := runTestSessions() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestSessions() error { + + // Test: basic round trip succeeds + + responderPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + + responderPublicKey, err := responderPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + + responderRootObfuscationSecret, err := GenerateRootObfuscationSecret() + if err != nil { + return errors.Trace(err) + } + + responderSessions, err := NewResponderSessions( + responderPrivateKey, responderRootObfuscationSecret) + if err != nil { + return errors.Trace(err) + } + + initiatorPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + + initiatorPublicKey, err := initiatorPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + + initiatorSessions := NewInitiatorSessions(initiatorPrivateKey) + + waitToShareSession := true + + sessionHandshakeTimeout := 100 * time.Millisecond + requestDelay := 1 * time.Microsecond + requestTimeout := 200 * time.Millisecond + + roundTripper := newTestSessionRoundTripper( + responderSessions, + &initiatorPublicKey, + sessionHandshakeTimeout, + requestDelay, + requestTimeout) + + request := roundTripper.MakeRequest() + + response, err := initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + return errors.TraceNew("unexpected response") + } + + // Test: session expires; new one negotiated + // + // sessionStateResponder_XK_recv_e_es_send_e_ee case, when Nonce = 0 + + responderSessions.sessions.Flush() + + request = roundTripper.MakeRequest() + + response, err = initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + return errors.TraceNew("unexpected response") + } + + // Test: session expires; new one negotiated + // + // "unexpected nonce" case, when Nonce > 0 + + for i := 0; i < 10; i++ { + _, err = initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + roundTripper.MakeRequest()) + if err != nil { + return errors.Trace(err) + } + } + + responderSessions.sessions.Flush() + + request = roundTripper.MakeRequest() + + response, err = initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + return errors.TraceNew("unexpected response") + } + + // Test: RoundTrips with waitToShareSession are interrupted when session + // fails + + responderSessions.sessions.Flush() + + initiatorSessions = NewInitiatorSessions(initiatorPrivateKey) + + failingRoundTripper := newTestSessionRoundTripper( + nil, + &initiatorPublicKey, + sessionHandshakeTimeout, + requestDelay, + requestTimeout) + + roundTripCount := 100 + + results := make(chan error, roundTripCount) + + for i := 0; i < roundTripCount; i++ { + go func() { + time.Sleep(prng.DefaultPRNG().Period(0, 10*time.Millisecond)) + waitToShareSession := true + _, err := initiatorSessions.RoundTrip( + context.Background(), + failingRoundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + roundTripper.MakeRequest()) + results <- err + }() + } + + waitToShareSessionFailed := false + for i := 0; i < roundTripCount; i++ { + err := <-results + if err == nil { + return errors.TraceNew("unexpected success") + } + if strings.HasSuffix(err.Error(), "waitToShareSession failed") { + waitToShareSessionFailed = true + } + } + if !waitToShareSessionFailed { + return errors.TraceNew("missing waitToShareSession failed error") + } + + // Test: expected known initiator public key + + initiatorSessions = NewInitiatorSessions(initiatorPrivateKey) + + responderSessions, err = NewResponderSessionsForKnownInitiators( + responderPrivateKey, + responderRootObfuscationSecret, + []SessionPublicKey{initiatorPublicKey}) + if err != nil { + return errors.Trace(err) + } + + roundTripper = newTestSessionRoundTripper( + responderSessions, + &initiatorPublicKey, + sessionHandshakeTimeout, + requestDelay, + requestTimeout) + + request = roundTripper.MakeRequest() + + response, err = initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + return errors.TraceNew("unexpected response") + } + + // Test: expected known initiator public key using SetKnownInitiatorPublicKeys + + initiatorSessions = NewInitiatorSessions(initiatorPrivateKey) + + responderSessions, err = NewResponderSessionsForKnownInitiators( + responderPrivateKey, + responderRootObfuscationSecret, + []SessionPublicKey{}) + if err != nil { + return errors.Trace(err) + } + + responderSessions.SetKnownInitiatorPublicKeys([]SessionPublicKey{initiatorPublicKey}) + + roundTripper = newTestSessionRoundTripper( + responderSessions, + &initiatorPublicKey, + sessionHandshakeTimeout, + requestDelay, + requestTimeout) + + request = roundTripper.MakeRequest() + + response, err = initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + return errors.Trace(err) + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + return errors.TraceNew("unexpected response") + } + + // The existing session should not be dropped as the original key remains valid. + responderSessions.SetKnownInitiatorPublicKeys([]SessionPublicKey{initiatorPublicKey}) + + if responderSessions.sessions.ItemCount() != 1 { + return errors.TraceNew("unexpected session cache state") + } + + otherKnownInitiatorPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + otherKnownInitiatorPublicKey, err := otherKnownInitiatorPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + + // The existing session should be dropped as the original key is not longer valid. + responderSessions.SetKnownInitiatorPublicKeys([]SessionPublicKey{otherKnownInitiatorPublicKey}) + + if responderSessions.sessions.ItemCount() != 0 { + return errors.TraceNew("unexpected session cache state") + } + + // Test: wrong known initiator public key + + unknownInitiatorPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + + unknownInitiatorSessions := NewInitiatorSessions(unknownInitiatorPrivateKey) + + ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second) + defer cancelFunc() + + request = roundTripper.MakeRequest() + + response, err = unknownInitiatorSessions.RoundTrip( + ctx, + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err == nil || !strings.HasSuffix(err.Error(), "unexpected initiator public key") { + return errors.Tracef("unexpected result: %v", err) + } + + // Test: many concurrent sessions + + responderSessions, err = NewResponderSessions( + responderPrivateKey, responderRootObfuscationSecret) + if err != nil { + return errors.Trace(err) + } + + roundTripper = newTestSessionRoundTripper( + responderSessions, + nil, + sessionHandshakeTimeout, + requestDelay, + requestTimeout) + + clientCount := 10000 + requestCount := 100 + concurrentRequestCount := 5 + + if common.IsRaceDetectorEnabled { + // Workaround for very high memory usage and OOM that occurs only with + // the race detector enabled. + clientCount = 100 + } + + resultChan := make(chan error, clientCount) + + for i := 0; i < clientCount; i++ { + + // Run clients concurrently + + go func() { + + initiatorPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + resultChan <- errors.Trace(err) + return + } + + initiatorSessions := NewInitiatorSessions(initiatorPrivateKey) + + for i := 0; i < requestCount; i += concurrentRequestCount { + + requestResultChan := make(chan error, concurrentRequestCount) + + for j := 0; j < concurrentRequestCount; j++ { + + // Run some of each client's requests concurrently, to + // exercise waitToShareSession + + go func(waitToShareSession bool) { + + request := roundTripper.MakeRequest() + + response, err := initiatorSessions.RoundTrip( + context.Background(), + roundTripper, + responderPublicKey, + responderRootObfuscationSecret, + waitToShareSession, + sessionHandshakeTimeout, + requestDelay, + requestTimeout, + request) + if err != nil { + requestResultChan <- errors.Trace(err) + return + } + + if !bytes.Equal(response, roundTripper.ExpectedResponse(request)) { + requestResultChan <- errors.TraceNew("unexpected response") + return + } + + requestResultChan <- nil + }(i%2 == 0) + } + + for i := 0; i < concurrentRequestCount; i++ { + err = <-requestResultChan + if err != nil { + resultChan <- errors.Trace(err) + return + } + } + } + + resultChan <- nil + }() + } + + for i := 0; i < clientCount; i++ { + err = <-resultChan + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +type testSessionRoundTripper struct { + sessions *ResponderSessions + expectedPeerPublicKey *SessionPublicKey + expectedSessionHandshakeTimeout time.Duration + expectedRequestDelay time.Duration + expectedRequestTimeout time.Duration +} + +func newTestSessionRoundTripper( + sessions *ResponderSessions, + expectedPeerPublicKey *SessionPublicKey, + expectedSessionHandshakeTimeout time.Duration, + expectedRequestDelay time.Duration, + expectedRequestTimeout time.Duration) *testSessionRoundTripper { + + return &testSessionRoundTripper{ + sessions: sessions, + expectedPeerPublicKey: expectedPeerPublicKey, + expectedSessionHandshakeTimeout: expectedSessionHandshakeTimeout, + expectedRequestDelay: expectedRequestDelay, + expectedRequestTimeout: expectedRequestTimeout, + } +} + +func (t *testSessionRoundTripper) MakeRequest() []byte { + return prng.Bytes(prng.Range(100, 1000)) +} + +func (t *testSessionRoundTripper) ExpectedResponse(requestPayload []byte) []byte { + l := len(requestPayload) + responsePayload := make([]byte, l) + for i, b := range requestPayload { + responsePayload[l-i-1] = b + } + return responsePayload +} + +func (t *testSessionRoundTripper) RoundTrip( + ctx context.Context, + roundTripDelay time.Duration, + roundTripTimeout time.Duration, + requestPayload []byte) ([]byte, error) { + + err := ctx.Err() + if err != nil { + return nil, errors.Trace(err) + } + + if t.sessions == nil { + return nil, errors.TraceNew("closed") + } + + if roundTripDelay > 0 { + common.SleepWithContext(ctx, roundTripDelay) + } + + _, requestCancelFunc := context.WithTimeout(ctx, roundTripTimeout) + defer requestCancelFunc() + + isRequestRoundTrip := false + + unwrappedRequestHandler := func(initiatorID ID, unwrappedRequest []byte) ([]byte, error) { + + if t.expectedPeerPublicKey != nil { + + curve25519, err := (*t.expectedPeerPublicKey).ToCurve25519() + if err != nil { + return nil, errors.Trace(err) + } + + if !bytes.Equal(initiatorID[:], curve25519[:]) { + return nil, errors.TraceNew("unexpected initiator ID") + } + } + + isRequestRoundTrip = true + + return t.ExpectedResponse(unwrappedRequest), nil + } + + responsePayload, err := t.sessions.HandlePacket(requestPayload, unwrappedRequestHandler) + if err != nil { + if responsePayload == nil { + return nil, errors.Trace(err) + } else { + fmt.Printf("HandlePacket returned packet and error: %v\n", err) + // Continue to relay packets + } + } else { + + // Handshake round trips and request payload round trips should have the + // appropriate delays/timeouts. + if isRequestRoundTrip { + if roundTripDelay != t.expectedRequestDelay { + return nil, errors.TraceNew("unexpected round trip delay") + } + if roundTripTimeout != t.expectedRequestTimeout { + return nil, errors.TraceNew("unexpected round trip timeout") + } + } else { + if roundTripDelay != time.Duration(0) { + return nil, errors.TraceNew("unexpected round trip delay") + } + if roundTripTimeout != t.expectedSessionHandshakeTimeout { + return nil, errors.TraceNew("unexpected round trip timeout") + } + } + } + + return responsePayload, nil +} + +func (t *testSessionRoundTripper) Close() error { + t.sessions = nil + return nil +} + +func TestNoise(t *testing.T) { + err := runTestNoise() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } +} + +func runTestNoise() error { + + prologue := []byte("psiphon-inproxy-session") + + initiatorPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + initiatorPublicKey, err := initiatorPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + curve25519InitiatorPublicKey, err := initiatorPublicKey.ToCurve25519() + if err != nil { + return errors.Trace(err) + } + initiatorKeys := noise.DHKey{ + Public: curve25519InitiatorPublicKey[:], + Private: initiatorPrivateKey.ToCurve25519()[:], + } + + responderPrivateKey, err := GenerateSessionPrivateKey() + if err != nil { + return errors.Trace(err) + } + responderPublicKey, err := responderPrivateKey.GetPublicKey() + if err != nil { + return errors.Trace(err) + } + curve25519ResponderPublicKey, err := responderPublicKey.ToCurve25519() + if err != nil { + return errors.Trace(err) + } + responderKeys := noise.DHKey{ + Public: curve25519ResponderPublicKey[:], + Private: responderPrivateKey.ToCurve25519()[:], + } + + initiatorHandshake, err := noise.NewHandshakeState( + noise.Config{ + CipherSuite: noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashBLAKE2b), + Pattern: noise.HandshakeXK, + Initiator: true, + Prologue: prologue, + StaticKeypair: initiatorKeys, + PeerStatic: responderKeys.Public, + }) + if err != nil { + return errors.Trace(err) + } + + responderHandshake, err := noise.NewHandshakeState( + noise.Config{ + CipherSuite: noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashBLAKE2b), + Pattern: noise.HandshakeXK, + Initiator: false, + Prologue: prologue, + StaticKeypair: responderKeys, + }) + if err != nil { + return errors.Trace(err) + } + + // Noise XK: -> e, es + + var initiatorMsg []byte + initiatorMsg, _, _, err = initiatorHandshake.WriteMessage(initiatorMsg, nil) + if err != nil { + return errors.Trace(err) + } + + var receivedPayload []byte + receivedPayload, _, _, err = responderHandshake.ReadMessage(nil, initiatorMsg) + if err != nil { + return errors.Trace(err) + } + if len(receivedPayload) > 0 { + return errors.TraceNew("unexpected payload") + } + + // Noise XK: <- e, ee + + var responderMsg []byte + responderMsg, _, _, err = responderHandshake.WriteMessage(responderMsg, nil) + if err != nil { + return errors.Trace(err) + } + + receivedPayload = nil + receivedPayload, _, _, err = initiatorHandshake.ReadMessage(nil, responderMsg) + if err != nil { + return errors.Trace(err) + } + if len(receivedPayload) > 0 { + return errors.TraceNew("unexpected payload") + } + + // Noise XK: -> s, se + payload + + sendPayload := prng.Bytes(1000) + + var initiatorSend, initiatorReceive *noise.CipherState + var initiatorReplay replay.Filter + + initiatorMsg = nil + initiatorMsg, initiatorSend, initiatorReceive, err = initiatorHandshake.WriteMessage(initiatorMsg, sendPayload) + if err != nil { + return errors.Trace(err) + } + if initiatorSend == nil || initiatorReceive == nil { + return errors.Tracef("unexpected incomplete handshake") + } + + var responderSend, responderReceive *noise.CipherState + var responderReplay replay.Filter + + receivedPayload = nil + receivedPayload, responderReceive, responderSend, err = responderHandshake.ReadMessage(receivedPayload, initiatorMsg) + if err != nil { + return errors.Trace(err) + } + if responderReceive == nil || responderSend == nil { + return errors.TraceNew("unexpected incomplete handshake") + } + if receivedPayload == nil { + return errors.TraceNew("missing payload") + } + if bytes.Compare(sendPayload, receivedPayload) != 0 { + return errors.TraceNew("incorrect payload") + } + + if bytes.Compare(responderHandshake.PeerStatic(), initiatorKeys.Public) != 0 { + return errors.TraceNew("unexpected initiator static public key") + } + + // post-handshake initiator <- responder + + nonce := responderSend.Nonce() + responderMsg = nil + responderMsg, err = responderSend.Encrypt(responderMsg, nil, receivedPayload) + if err != nil { + return errors.Trace(err) + } + + initiatorReceive.SetNonce(nonce) + receivedPayload = nil + receivedPayload, err = initiatorReceive.Decrypt(receivedPayload, nil, responderMsg) + if err != nil { + return errors.Trace(err) + } + if !initiatorReplay.ValidateCounter(nonce, math.MaxUint64) { + return errors.TraceNew("replay detected") + } + if bytes.Compare(sendPayload, receivedPayload) != 0 { + return errors.TraceNew("incorrect payload") + } + + for i := 0; i < 100; i++ { + + // post-handshake initiator -> responder + + sendPayload = prng.Bytes(1000) + + nonce = initiatorSend.Nonce() + initiatorMsg = nil + initiatorMsg, err = initiatorSend.Encrypt(initiatorMsg, nil, sendPayload) + if err != nil { + return errors.Trace(err) + } + + responderReceive.SetNonce(nonce) + receivedPayload = nil + receivedPayload, err = responderReceive.Decrypt(receivedPayload, nil, initiatorMsg) + if err != nil { + return errors.Trace(err) + } + if !responderReplay.ValidateCounter(nonce, math.MaxUint64) { + return errors.TraceNew("replay detected") + } + if bytes.Compare(sendPayload, receivedPayload) != 0 { + return errors.TraceNew("incorrect payload") + } + + // post-handshake initiator <- responder + + nonce = responderSend.Nonce() + responderMsg = nil + responderMsg, err = responderSend.Encrypt(responderMsg, nil, receivedPayload) + if err != nil { + return errors.Trace(err) + } + + responderReceive.SetNonce(nonce) + receivedPayload = nil + receivedPayload, err = initiatorReceive.Decrypt(receivedPayload, nil, responderMsg) + if err != nil { + return errors.Trace(err) + } + if !initiatorReplay.ValidateCounter(nonce, math.MaxUint64) { + return errors.TraceNew("replay detected") + } + if bytes.Compare(sendPayload, receivedPayload) != 0 { + return errors.TraceNew("incorrect payload") + } + } + + return nil +} diff --git a/psiphon/common/inproxy/webrtc.go b/psiphon/common/inproxy/webrtc.go new file mode 100644 index 000000000..c5a2abfcb --- /dev/null +++ b/psiphon/common/inproxy/webrtc.go @@ -0,0 +1,2136 @@ +//go:build PSIPHON_ENABLE_INPROXY + +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package inproxy + +import ( + "bytes" + "context" + "encoding/binary" + std_errors "errors" + "fmt" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + inproxy_dtls "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy/dtls" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/stacktrace" + "github.com/pion/datachannel" + "github.com/pion/dtls/v2" + "github.com/pion/ice/v2" + pion_logging "github.com/pion/logging" + "github.com/pion/sdp/v3" + "github.com/pion/stun" + "github.com/pion/transport/v2" + "github.com/pion/webrtc/v3" + "github.com/wlynxg/anet" +) + +const ( + dataChannelAwaitTimeout = 20 * time.Second + dataChannelBufferedAmountLowThreshold uint64 = 512 * 1024 + dataChannelMaxBufferedAmount uint64 = 1024 * 1024 + dataChannelMaxMessageSize = 65536 + dataChannelMaxLabelLength = 512 + + // Psiphon uses a fork of github.com/pion/dtls/v2, selected with go mod + // replace, which has an idential API apart from dtls.IsPsiphon. If + // dtls.IsPsiphon is undefined, the build is not using the fork. + // + // Limitation: this doesn't check that the vendored code is exactly the + // same code as the fork. + assertDTLSFork = dtls.IsPsiphon + + // Similarly, check for the fork of github.com/pion/ice/v2. + assertICEFork = ice.IsPsiphon + + // Note that Psiphon also uses a fork of github.com/pion/webrtc/v3, but it + // has an API change which will cause builds to fail when not present. +) + +// webRTCConn is a WebRTC connection between two peers, with a data channel +// used to relay streams or packets between them. WebRTCConn implements the +// net.Conn interface. +type webRTCConn struct { + config *webRTCConfig + trafficShapingParameters *DataChannelTrafficShapingParameters + + mutex sync.Mutex + udpConn net.PacketConn + portMapper *portMapper + isClosed bool + closedSignal chan struct{} + peerConnection *webrtc.PeerConnection + dataChannel *webrtc.DataChannel + dataChannelConn datachannel.ReadWriteCloser + dataChannelOpenedSignal chan struct{} + dataChannelOpenedOnce sync.Once + dataChannelWriteBufferSignal chan struct{} + decoyDone bool + iceCandidatePairMetrics common.LogFields + + readMutex sync.Mutex + readBuffer []byte + readOffset int + readLength int + readError error + peerPaddingDone bool + + writeMutex sync.Mutex + trafficShapingPRNG *prng.PRNG + trafficShapingBuffer *bytes.Buffer + paddedMessageCount int + decoyMessageCount int + trafficShapingDone bool + + paddedMessagesSent int32 + paddedMessagesReceived int32 + decoyMessagesSent int32 + decoyMessagesReceived int32 +} + +// webRTCConfig specifies the configuration for a WebRTC dial. +type webRTCConfig struct { + + // Logger is used to log events. + Logger common.Logger + + // EnableDebugLogging indicates whether to log pion/webrtc debug and trace + // events. When enabled, these events will be logged to the specified + // Logger at a Debug log level. + EnableDebugLogging bool + + // WebRTCDialCoordinator specifies specific WebRTC dial strategies and + // settings; WebRTCDialCoordinator also facilities dial replay by + // receiving callbacks when individual dial steps succeed or fail. + WebRTCDialCoordinator WebRTCDialCoordinator + + // ClientRootObfuscationSecret is generated (or replayed) by the client + // and sent to the proxy and used to drive obfuscation operations. + ClientRootObfuscationSecret ObfuscationSecret + + // DoDTLSRandomization indicates whether to perform DTLS randomization. + DoDTLSRandomization bool + + // TrafficShapingParameters indicates whether and how to perform data channel traffic shaping. + TrafficShapingParameters *DataChannelTrafficShapingParameters + + // ReliableTransport indicates whether to configure the WebRTC data + // channel to use reliable transport. Set ReliableTransport when proxying + // a TCP stream, and unset it when proxying a UDP packets flow with its + // own reliability later, such as QUIC. + ReliableTransport bool +} + +// newWebRTCConnWithOffer initiates a new WebRTC connection. An offer SDP is +// returned, to be sent to the peer. After the offer SDP is forwarded and an +// answer SDP received in response, call SetRemoteSDP with the answer SDP and +// then call AwaitInitialDataChannel to await the eventual WebRTC connection +// establishment. +func newWebRTCConnWithOffer( + ctx context.Context, + config *webRTCConfig) ( + *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { + + conn, SDP, metrics, err := newWebRTCConn(ctx, config, nil) + if err != nil { + return nil, WebRTCSessionDescription{}, nil, errors.Trace(err) + } + return conn, *SDP, metrics, nil +} + +// newWebRTCConnWithAnswer creates a new WebRTC connection initiated by a peer +// that provided an offer SDP. An answer SDP is returned to be sent to the +// peer. After the answer SDP is forwarded, call AwaitInitialDataChannel to +// await the eventual WebRTC connection establishment. +func newWebRTCConnWithAnswer( + ctx context.Context, + config *webRTCConfig, + peerSDP WebRTCSessionDescription) ( + *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { + + conn, SDP, metrics, err := newWebRTCConn(ctx, config, &peerSDP) + if err != nil { + return nil, WebRTCSessionDescription{}, nil, errors.Trace(err) + } + return conn, *SDP, metrics, nil +} + +func newWebRTCConn( + ctx context.Context, + config *webRTCConfig, + peerSDP *WebRTCSessionDescription) ( + retconn *webRTCConn, + retSDP *WebRTCSessionDescription, + retMetrics *webRTCSDPMetrics, + retErr error) { + + isOffer := peerSDP == nil + + udpConn, err := config.WebRTCDialCoordinator.UDPListen(ctx) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + // Initialize WebRTC + + // There is no explicit anti-probing measures for the proxy side of the + // WebRTC connection, since each proxy "listener" is ephemeral, and since + // the WebRTC data channel protocol authenticates peers with + // certificates, so even if a probe were to find an ephemeral proxy + // listener, the listener can respond the same as a normal WebRTC end + // point would respond to a peer that doesn't have the correct credentials. + // + // pion's Mux API is used, as it enables providing a pre-created UDP + // socket which is configured with necessary BindToDevice settings. We do + // not actually multiplex multiple client connections on a single proxy + // connection. As a proxy creates a new UDP socket and Mux for each + // client, this currently open issue should not impact our + // implementation: "Listener doesn't process parallel handshakes", + // https://github.com/pion/dtls/issues/279. + // + // We detach data channels in order to use the standard Read/Write APIs. + // As detaching avoids using the pion DataChannel read loop, this + // currently open issue should not impact our + // implementation: "DataChannel.readLoop goroutine leak", + // https://github.com/pion/webrtc/issues/2098. + + // UDPMux Limitations: + // + // For Psiphon, WebRTCDialCoordinator.UDPListen will call + // https://pkg.go.dev/net#ListenUDP with an unspecified IP address, in + // order to listen on all available interfaces, both IPv4 and IPv6. + // However, using webrtc.NewICEUDPMux and a UDP conn with an unspecified + // IP address results in this log warning: "UDPMuxDefault should not + // listening on unspecified address, use NewMultiUDPMuxFromPort instead". + // + // With NewICEUDPMux and an unspecified IP address, pion currently + // enumerates local, active interfaces and derives a list of listening + // addresses, combining each interface's IP addresses with the assigned + // port: + // https://github.com/pion/ice/blob/8c5b0991ef3bb070e47afda96faf090e8bf94be6/net.go#L35. + // While this works ok in many cases, this PR, + // https://github.com/pion/ice/pull/475, indicates the nature of the + // issue with UDPMuxDefault: + // + // > When we have multiple host candidates and been mux to a single port, + // > if these candidates share a same conn (either tcp or udp), they + // > might read other's [messages causing failure]. + // + // This PR, https://github.com/pion/ice/pull/473, also describes the issue: + // + // > When using UDPMux and UniversalUDPMux, it is possible that a + // > registerConnForAddress() could be called twice or more for the same + // > remote candidate (endpoint) by different candidates. E.g., when + // > different HOST candidates ping the same remote candidate, the + // > udpMuxedConn gets stored once. The second candidate will never + // > receive a response. This is also the case when a single socket is + // > used for gathering SRFLX and HOST candidates. + // + // PR 475 introduced MultiUDPMuxDefault to address the issue. However, at + // this time, https://github.com/pion/ice/releases/tag/v2.3.6, there's an + // open bug with MultiUDPMuxDefault + // https://github.com/pion/ice/issues/507: "Multi UDP Mux can't works + // when remote also enables Multi UDP Mux". Running the test program + // attached to the bug confirms that no data channel is established; + // while switching the test code to use NewICEUDPMux results in a + // successful data channel connection. Since we need to use a Mux API on + // both clients and proxies, we can't yet use MultiUDPMux. + // + // We patch pion/webrtc to add the SetICEUDPMuxSrflx functionality from + // the currently unmerged https://github.com/pion/webrtc/pull/2298. + // Without SetICEUDPMuxSrflx, STUN operations don't use the mux. + // + // We patch pion/ice gatherCandidatesSrflxUDPMux vendor patch to include + // only the correct network type (IPv4 or IPv6) address candidates. + // Without this patch, we observed up to 2x duplicate/redundant STUN + // candidates. + // + // TODO: implement and try using transport.Net UDP dial functions in place + // of NewICEUDPMux and pre-dialed UDP conn; track all dialed UDP + // connections to close on WebRTCConn.Close; this approach would require + // an alternative approach to injecting port mapping candidates, which + // currently depends on the mux UDP socket being available outside of pion. + + // Another limitation and issue with NewICEUDPMux is that its enumeration + // of all local interfaces and IPs includes many IPv6 addresses for + // certain interfaces. For example, on macOS, + // https://apple.stackexchange.com/a/371661, there are "secured" IPv6 + // addresses and many "temporary" IPv6 addresses, with all but one + // temporary address being "deprecated". Instead of a full enumeration, + // we should select only the non-deprecated temporary IPv6 address -- + // both for performance (avoid excess STUN requests) and privacy. + // + // Go has a proposal to expose the necessary IPv6 address information: + // https://github.com/golang/go/issues/42694. However, as of Android SDK + // 30, Go's net.InterfaceAddrs doesn't work at all: + // https://github.com/pion/transport/issues/228, + // https://github.com/golang/go/issues/40569. + // + // Note that it's not currently possible to + // webrtc.SettingEngine.SetIPFilter to limit IPv6 selection to a single + // candidate; that IP filter is not passed through to localInterfaces in + // the NewUDPMuxDefault case. And even if it were, there's no guarantee + // that the the first IPv6 address passed to the filter would be the + // non-deprecated temporary address. + // + // To workaround net.Interface issues, we use SettingEngine.SetNet to plug + // in an alternative implementation of net.Interface which selects only + // one IPv4 and one IPv6 active interface and IP address and uses the + // anet package for Android. See pionNetwork for more details. + + deadline, _ := ctx.Deadline() + TTL := time.Until(deadline) + + pionLoggerFactory := newPionLoggerFactory( + config.Logger, + config.EnableDebugLogging) + + pionNetwork := newPionNetwork( + ctx, pionLoggerFactory.NewLogger("net"), config.WebRTCDialCoordinator) + + udpMux := webrtc.NewICEUniversalUDPMux( + pionLoggerFactory.NewLogger("mux"), udpConn, TTL, pionNetwork) + + settingEngine := webrtc.SettingEngine{ + LoggerFactory: pionLoggerFactory, + } + settingEngine.SetNet(pionNetwork) + settingEngine.DetachDataChannels() + settingEngine.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled) + settingEngine.SetICEUDPMux(udpMux) + settingEngine.SetICEUDPMuxSrflx(udpMux) + + // Set this behavior to look like common web browser WebRTC stacks. + settingEngine.SetDTLSInsecureSkipHelloVerify(true) + + settingEngine.EnableSCTPZeroChecksum(true) + + // Timeout, retry, and delay adjustments + // + // - Add some jitter to timed operations to avoid a trivial pion timing + // fingerprint. + // + // - Reduce the wait time for STUN and peer reflexive candidates from the + // default 500ms and 1s. + // + // - Reduce keepalives from the default 2s to +/-15s and increase + // disconnect timeout from the default 5s to 3x15s. + // + // TODO: + // + // - Configuration via tactics. + // + // - While the RFC, + // https://datatracker.ietf.org/doc/html/rfc5245#section-10, calls for + // keep alives no less than 15s, implementations such as Chrome send + // keep alives much more frequently, + // https://issues.webrtc.org/issues/42221718. + // + // - Varying the period bewteen each keepalive, as is done with SSH via + // SSHKeepAlivePeriodMin/Max, requires changes to pion/dtls. + // + // - Some traffic-related timeouts are not yet exposed via settingEngine, + // including ice.defaultSTUNGatherTimeout, ice.maxBindingRequestTimeout. + + settingEngine.SetDTLSRetransmissionInterval(prng.JitterDuration(100*time.Millisecond, 0.1)) + settingEngine.SetHostAcceptanceMinWait(0) + settingEngine.SetSrflxAcceptanceMinWait(prng.JitterDuration(100*time.Millisecond, 0.1)) + settingEngine.SetPrflxAcceptanceMinWait(prng.JitterDuration(200*time.Millisecond, 0.1)) + settingEngine.SetICETimeouts(45*time.Second, 0, prng.JitterDuration(15*time.Second, 0.2)) + settingEngine.SetICEMaxBindingRequests(10) + + // Initialize data channel obfuscation + + config.Logger.WithTraceFields(common.LogFields{ + "dtls_randomization": config.DoDTLSRandomization, + "data_channel_traffic_shaping": config.TrafficShapingParameters != nil, + }).Info("webrtc_data_channel_obfuscation") + + // Facilitate DTLS Client/ServerHello randomization. The client decides + // whether to do DTLS randomization and generates and the proxy receives + // ClientRootObfuscationSecret, so the client can orchestrate replay on + // both ends of the connection by reusing an obfuscation secret. Derive a + // secret specific to DTLS. SetDTLSSeed will futher derive a secure PRNG + // seed specific to either the client or proxy end of the connection + // (so each peer's randomization will be distinct). + // + // To avoid forking many pion repos in order to pass the seed through to + // the DTLS implementation, SetDTLSSeed attaches the seed to the DTLS + // dial context. + // + // Either SetDTLSSeed or SetNoDTLSSeed should be set for each conn, as the + // pion/dtl fork treats no-seed as an error, as a check against the + // context value mechanism. + + var dtlsCtx context.Context + if config.DoDTLSRandomization { + + dtlsObfuscationSecret, err := deriveObfuscationSecret( + config.ClientRootObfuscationSecret, "in-proxy-DTLS-seed") + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + baseSeed := prng.Seed(dtlsObfuscationSecret) + + dtlsCtx, err = inproxy_dtls.SetDTLSSeed(ctx, &baseSeed, isOffer) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + } else { + + dtlsCtx = inproxy_dtls.SetNoDTLSSeed(ctx) + } + settingEngine.SetDTLSConnectContextMaker(func() (context.Context, func()) { + return context.WithCancel(dtlsCtx) + }) + + // Configure traffic shaping, which adds random padding and decoy messages + // to data channel message flows. + + var trafficShapingPRNG *prng.PRNG + trafficShapingBuffer := new(bytes.Buffer) + paddedMessageCount := 0 + decoyMessageCount := 0 + + if config.TrafficShapingParameters != nil { + + // TODO: also use pion/dtls.Config.PaddingLengthGenerator? + + trafficShapingContext := "in-proxy-data-channel-traffic-shaping-offer" + if !isOffer { + trafficShapingContext = "in-proxy-data-channel-traffic-shaping-answer" + } + + trafficShapingObfuscationSecret, err := deriveObfuscationSecret( + config.ClientRootObfuscationSecret, trafficShapingContext) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + seed := prng.Seed(trafficShapingObfuscationSecret) + trafficShapingPRNG = prng.NewPRNGWithSeed(&seed) + + paddedMessageCount = trafficShapingPRNG.Range( + config.TrafficShapingParameters.MinPaddedMessages, + config.TrafficShapingParameters.MaxPaddedMessages) + + decoyMessageCount = trafficShapingPRNG.Range( + config.TrafficShapingParameters.MinDecoyMessages, + config.TrafficShapingParameters.MaxDecoyMessages) + } + + // NAT traversal setup + + // When DisableInboundForMobileNetworks is set, skip both STUN and port + // mapping for mobile networks. Most mobile networks use CGNAT and + // neither STUN nor port mapping will be effective. It's faster to not + // wait for something that ultimately won't work. + + disableInbound := config.WebRTCDialCoordinator.DisableInboundForMobileNetworks() && + config.WebRTCDialCoordinator.NetworkType() == NetworkTypeMobile + + // Try to establish a port mapping (UPnP-IGD, PCP, or NAT-PMP). The port + // mapper will attempt to identify the local gateway and query various + // port mapping protocols. portMapper.start launches this process and + // does not block. Port mappings are not part of the WebRTC standard, or + // supported by pion/webrtc. Instead, if a port mapping is established, + // it's edited into the SDP as a new host-type ICE candidate. + + localPort := udpConn.LocalAddr().(*net.UDPAddr).Port + portMapper := newPortMapper(config.Logger, localPort) + + doPortMapping := !disableInbound && !config.WebRTCDialCoordinator.DisablePortMapping() + + if doPortMapping { + portMapper.start() + } + + // Select a STUN server for ICE hole punching. The STUN server to be used + // needs only support bind and not full RFC5780 NAT discovery. + // + // Each dial trys only one STUN server; in Psiphon tunnel establishment, + // other, concurrent in-proxy dials may select alternative STUN servers + // via WebRTCDialCoordinator. When the STUN server operation is successful, + // WebRTCDialCoordinator will be signaled so that it may configure the STUN + // server selection for replay. + // + // The STUN server will observe proxy IP addresses. Enumeration is + // mitigated by using various public STUN servers, including Psiphon STUN + // servers for proxies in non-censored regions. Proxies are also more + // ephemeral than Psiphon servers. + + RFC5780 := false + stunServerAddress := config.WebRTCDialCoordinator.STUNServerAddress(RFC5780) + + // Proceed even when stunServerAddress is "" and !DisableSTUN, as ICE may + // find other host candidates. + + doSTUN := stunServerAddress != "" && !disableInbound && !config.WebRTCDialCoordinator.DisableSTUN() + + var ICEServers []webrtc.ICEServer + + if doSTUN { + // stunServerAddress domain names are resolved with the Psiphon custom + // resolver via pionNetwork.ResolveUDPAddr + ICEServers = []webrtc.ICEServer{{URLs: []string{"stun:" + stunServerAddress}}} + } + + conn := &webRTCConn{ + config: config, + + udpConn: udpConn, + portMapper: portMapper, + closedSignal: make(chan struct{}), + dataChannelOpenedSignal: make(chan struct{}), + dataChannelWriteBufferSignal: make(chan struct{}, 1), + + // A data channel uses SCTP and is message oriented. The maximum + // message size supported by pion/webrtc is 65536: + // https://github.com/pion/webrtc/blob/dce970438344727af9c9965f88d958c55d32e64d/datachannel.go#L19. + // This read buffer must be as large as the maximum message size or + // else a read may fail with io.ErrShortBuffer. + readBuffer: make([]byte, dataChannelMaxMessageSize), + + trafficShapingPRNG: trafficShapingPRNG, + trafficShapingBuffer: trafficShapingBuffer, + paddedMessageCount: paddedMessageCount, + decoyMessageCount: decoyMessageCount, + } + defer func() { + if retErr != nil { + // Cleanup on early return + conn.Close() + + // Notify the WebRTCDialCoordinator that the operation failed so + // that it can clear replay for that STUN server selection. + // + // Limitation: the error here may be due to failures unrelated to + // the STUN server. + + if ctx.Err() == nil && doSTUN { + config.WebRTCDialCoordinator.STUNServerAddressFailed(RFC5780, stunServerAddress) + } + } + }() + + settingEngine.SetICEBindingRequestHandler(conn.onICEBindingRequest) + + // All settingEngine configuration must be done before calling NewAPI. + webRTCAPI := webrtc.NewAPI(webrtc.WithSettingEngine(settingEngine)) + + conn.peerConnection, err = webRTCAPI.NewPeerConnection( + webrtc.Configuration{ + ICEServers: ICEServers, + }) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + conn.peerConnection.OnConnectionStateChange(conn.onConnectionStateChange) + conn.peerConnection.OnICECandidate(conn.onICECandidate) + conn.peerConnection.OnICEConnectionStateChange(conn.onICEConnectionStateChange) + conn.peerConnection.OnICEGatheringStateChange(conn.onICEGatheringStateChange) + conn.peerConnection.OnNegotiationNeeded(conn.onNegotiationNeeded) + conn.peerConnection.OnSignalingStateChange(conn.onSignalingStateChange) + conn.peerConnection.OnDataChannel(conn.onDataChannel) + + // As a future enhancement, consider using media channels instead of data + // channels, as media channels may be more common. Proxied QUIC would + // work over an unreliable media channel. Note that a media channel is + // still prefixed with STUN and DTLS exchanges before SRTP begins, so the + // first few packets are the same as a data channel. + + // The offer sets the data channel configuration. + if isOffer { + + dataChannelInit := &webrtc.DataChannelInit{} + if !config.ReliableTransport { + ordered := false + dataChannelInit.Ordered = &ordered + maxRetransmits := uint16(0) + dataChannelInit.MaxRetransmits = &maxRetransmits + } + + // Generate a random length label, to vary the DATA_CHANNEL_OPEN + // message length. The label is derived from and replayed via + // ClientRootObfuscationSecret. + labelObfuscationSecret, err := deriveObfuscationSecret( + config.ClientRootObfuscationSecret, "in-proxy-data-channel-label") + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + seed := prng.Seed(labelObfuscationSecret) + labelPRNG := prng.NewPRNGWithSeed(&seed) + dataChannelLabel := labelPRNG.HexString( + labelPRNG.Range(1, dataChannelMaxLabelLength/2)) + + dataChannel, err := conn.peerConnection.CreateDataChannel( + dataChannelLabel, dataChannelInit) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + conn.setDataChannel(dataChannel) + } + + // Prepare to await full ICE completion, including STUN candidates. + // Trickle ICE is not used, simplifying the broker API. It's expected + // that most clients and proxies will be behind a NAT, and not have + // publicly addressable host candidates. TURN is not used. So most + // candidates will be STUN, or server-reflexive, candidates. + // + // Later, the first to complete out of ICE or port mapping is used. + // + // TODO: stop waiting if an IPv6 host candidate is found? + + iceComplete := webrtc.GatheringCompletePromise(conn.peerConnection) + + // Create an offer, or input a peer's offer to create an answer. + + if isOffer { + + offer, err := conn.peerConnection.CreateOffer(nil) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + err = conn.peerConnection.SetLocalDescription(offer) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + } else { + + pionSessionDescription := webrtc.SessionDescription{ + Type: webrtc.SDPType(peerSDP.Type), + SDP: peerSDP.SDP, + } + + err = conn.peerConnection.SetRemoteDescription(pionSessionDescription) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + answer, err := conn.peerConnection.CreateAnswer(nil) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + err = conn.peerConnection.SetLocalDescription(answer) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + } + + // Await either ICE or port mapping completion. + + // As a future enhancement, track which of ICE or port mapping succeeds + // and is then followed by a failed WebRTC dial; stop trying the method + // that often fails. + + iceCompleted := false + portMappingExternalAddr := "" + + select { + case <-iceComplete: + iceCompleted = true + + case portMappingExternalAddr = <-portMapper.portMappingExternalAddress(): + + // Set responding port mapping types for metrics. + // + // Limitation: if there are multiple responding protocol types, it's + // not known here which was used for this dial. + config.WebRTCDialCoordinator.SetPortMappingTypes( + getRespondingPortMappingTypes(config.WebRTCDialCoordinator.NetworkID())) + + case <-ctx.Done(): + return nil, nil, nil, errors.Trace(ctx.Err()) + } + + // Release any port mapping resources when not using it. + if portMapper != nil && portMappingExternalAddr == "" { + portMapper.close() + conn.portMapper = nil + } + + config.Logger.WithTraceFields(common.LogFields{ + "ice_completed": iceCompleted, + "port_mapping": portMappingExternalAddr != "", + }).Info("webrtc_candidates_gathered") + + // Get the offer or answer, now populated with any ICE candidates. + + localDescription := conn.peerConnection.LocalDescription() + + // Adjust the SDP, removing local network addresses and adding any + // port mapping candidate. Clients (offer) are permitted to have + // no ICE candidates but proxies (answer) must have at least one + //candidate. + errorOnNoCandidates := !isOffer + + adjustedSDP, metrics, err := prepareSDPAddresses( + []byte(localDescription.SDP), + errorOnNoCandidates, + portMappingExternalAddr, + config.WebRTCDialCoordinator.DisableIPv6ICECandidates()) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + + // When STUN was attempted, ICE completed, and a STUN server-reflexive + // candidate is present, notify the WebRTCDialCoordinator so that it can + // set replay for that STUN server selection. + + if iceCompleted && doSTUN { + hasServerReflexive := false + for _, candidateType := range metrics.iceCandidateTypes { + if candidateType == ICECandidateServerReflexive { + hasServerReflexive = true + } + } + if hasServerReflexive { + config.WebRTCDialCoordinator.STUNServerAddressSucceeded(RFC5780, stunServerAddress) + } else { + config.WebRTCDialCoordinator.STUNServerAddressFailed(RFC5780, stunServerAddress) + } + } + + // The WebRTCConn is prepared, but the data channel is not yet connected. + // On the offer end, the peer's following answer must be input to + // SetRemoteSDP. And both ends must call AwaitInitialDataChannel to await + // the data channel establishment. + + return conn, + &WebRTCSessionDescription{ + Type: int(localDescription.Type), + SDP: string(adjustedSDP), + }, + metrics, + nil +} + +func (conn *webRTCConn) setDataChannel(dataChannel *webrtc.DataChannel) { + + // Assumes the caller holds conn.mutex, or is newWebRTCConn, creating the + // conn. + + conn.dataChannel = dataChannel + conn.dataChannel.OnOpen(conn.onDataChannelOpen) + conn.dataChannel.OnClose(conn.onDataChannelClose) + + // Set up flow control (see comment in conn.Write) + conn.dataChannel.SetBufferedAmountLowThreshold(dataChannelBufferedAmountLowThreshold) + conn.dataChannel.OnBufferedAmountLow(func() { + select { + case conn.dataChannelWriteBufferSignal <- struct{}{}: + default: + } + }) +} + +// SetRemoteSDP takes the answer SDP that is received in response to an offer +// SDP. SetRemoteSDP initiates the WebRTC connection establishment on the +// offer end. +func (conn *webRTCConn) SetRemoteSDP(peerSDP WebRTCSessionDescription) error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + pionSessionDescription := webrtc.SessionDescription{ + Type: webrtc.SDPType(peerSDP.Type), + SDP: peerSDP.SDP, + } + + err := conn.peerConnection.SetRemoteDescription(pionSessionDescription) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// AwaitInitialDataChannel returns when the data channel is established, or +// when an error has occured. +func (conn *webRTCConn) AwaitInitialDataChannel(ctx context.Context) error { + + // Don't lock the mutex, or else necessary operations will deadlock. + + select { + case <-conn.dataChannelOpenedSignal: + + // The data channel is connected. + + err := conn.recordSelectedICECandidateStats() + if err != nil { + conn.config.Logger.WithTraceFields(common.LogFields{ + "error": err.Error()}).Warning("recordCandidateStats failed") + // Continue without log + } + + case <-ctx.Done(): + return errors.Tracef("with ICE candidate pairs %s: %w", + conn.getICECandidatePairsSummary(), + ctx.Err()) + + case <-conn.closedSignal: + return errors.TraceNew("connection has closed") + } + + return nil +} + +func (conn *webRTCConn) getICECandidatePairsSummary() string { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + stateCounts := map[webrtc.StatsICECandidatePairState]int{} + + statsReport := conn.peerConnection.GetStats() + for key, stats := range statsReport { + + // Uses the pion StatsReport key formats "candidate:" + // and "candidate:-candidate:" + + key, found := strings.CutPrefix(key, "candidate:") + if !found { + continue + } + candidateIDs := strings.Split(key, "-candidate:") + if len(candidateIDs) != 2 { + continue + } + + candidatePairStats, ok := stats.(webrtc.ICECandidatePairStats) + if !ok { + continue + } + + stateCounts[candidatePairStats.State] += 1 + } + + if len(stateCounts) == 0 { + return "(none)" + } + + var strs []string + for state, count := range stateCounts { + strs = append(strs, fmt.Sprintf("%s(%d)", state, count)) + } + return strings.Join(strs, ", ") +} + +func (conn *webRTCConn) recordSelectedICECandidateStats() error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + statsReport := conn.peerConnection.GetStats() + foundNominatedPair := false + for key, stats := range statsReport { + + // Uses the pion StatsReport key formats "candidate:" + // and "candidate:-candidate:" + + key, found := strings.CutPrefix(key, "candidate:") + if !found { + continue + } + candidateIDs := strings.Split(key, "-candidate:") + if len(candidateIDs) != 2 { + continue + } + + candidatePairStats, ok := stats.(webrtc.ICECandidatePairStats) + if !ok || + candidatePairStats.State != webrtc.StatsICECandidatePairStateSucceeded || + !candidatePairStats.Nominated { + continue + } + + localKey := fmt.Sprintf("candidate:%s", candidateIDs[0]) + stats, ok := statsReport[localKey] + if !ok { + return errors.TraceNew("missing local ICECandidateStats") + } + localCandidateStats, ok := stats.(webrtc.ICECandidateStats) + if !ok { + return errors.TraceNew("unexpected local ICECandidateStats") + } + + remoteKey := fmt.Sprintf("candidate:%s", candidateIDs[1]) + stats, ok = statsReport[remoteKey] + if !ok { + return errors.TraceNew("missing remote ICECandidateStats") + } + remoteCandidateStats, ok := stats.(webrtc.ICECandidateStats) + if !ok { + return errors.TraceNew("unexpected remote ICECandidateStats") + } + + // Use the same ICE candidate type names as logged in broker logs. + logCandidateType := func( + iceCandidateType webrtc.ICECandidateType) string { + logType := ICECandidateUnknown + switch iceCandidateType { + case webrtc.ICECandidateTypeHost: + logType = ICECandidateHost + case webrtc.ICECandidateTypeSrflx: + logType = ICECandidateServerReflexive + case webrtc.ICECandidateTypePrflx: + logType = ICECandidatePeerReflexive + } + return logType.String() + } + + conn.iceCandidatePairMetrics = common.LogFields{} + + // TODO: log which of local/remote candidate is initiator + + conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_type"] = + logCandidateType(localCandidateStats.CandidateType) + localIP := net.ParseIP(localCandidateStats.IP) + isIPv6 := "0" + if localIP != nil && localIP.To4() == nil { + isIPv6 = "1" + } + conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_is_IPv6"] = + isIPv6 + conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_port"] = + localCandidateStats.Port + + conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_type"] = + logCandidateType(remoteCandidateStats.CandidateType) + remoteIP := net.ParseIP(remoteCandidateStats.IP) + isIPv6 = "0" + if remoteIP != nil && remoteIP.To4() == nil { + isIPv6 = "1" + } + conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_is_IPv6"] = + isIPv6 + conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_port"] = + remoteCandidateStats.Port + + foundNominatedPair = true + break + } + if !foundNominatedPair { + return errors.TraceNew("missing nominated ICECandidateStatsPair") + } + + return nil +} + +func (conn *webRTCConn) Close() error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + if conn.isClosed { + return nil + } + + if conn.portMapper != nil { + conn.portMapper.close() + } + + if conn.dataChannelConn != nil { + conn.dataChannelConn.Close() + } + if conn.dataChannel != nil { + conn.dataChannel.Close() + } + if conn.peerConnection != nil { + conn.peerConnection.Close() + } + + // Close the udpConn to interrupt any blocking DTLS handshake: + // https://github.com/pion/webrtc/blob/c1467e4871c78ee3f463b50d858d13dc6f2874a4/dtlstransport.go#L334-L340 + // + // Limitation: there is no guarantee that pion sends any closing packets + // before the UDP socket is closed here. + + if conn.udpConn != nil { + conn.udpConn.Close() + } + + close(conn.closedSignal) + + conn.isClosed = true + + return nil +} + +func (conn *webRTCConn) IsClosed() bool { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + return conn.isClosed +} + +func (conn *webRTCConn) Read(p []byte) (int, error) { + + for { + + n, err := conn.readMessage(p) + if err != nil || n > 0 { + return n, err + } + + // A decoy message was read; discard and read again. + } +} + +func (conn *webRTCConn) readMessage(p []byte) (int, error) { + + // Don't hold this lock, or else concurrent Writes will be blocked. + conn.mutex.Lock() + isClosed := conn.isClosed + dataChannelConn := conn.dataChannelConn + decoyDone := conn.decoyDone + conn.mutex.Unlock() + + if isClosed { + return 0, errors.TraceNew("closed") + } + + if dataChannelConn == nil { + return 0, errors.TraceNew("no data channel") + } + + // The input read buffer, p, may not be the same length as the message + // read from the data channel. Buffer the read message if another Read + // call is necessary to consume it. As per https://pkg.go.dev/io#Reader, + // dataChannelConn bytes read are processed even when + // dataChannelConn.Read returns an error; the error value is stored and + // returned with the Read call that consumes the end of the message buffer. + + conn.readMutex.Lock() + defer conn.readMutex.Unlock() + + if conn.readOffset == conn.readLength { + n, err := dataChannelConn.Read(conn.readBuffer) + conn.readOffset = 0 + conn.readLength = n + conn.readError = err + + // Skip over padding. + + if n > 0 && !conn.peerPaddingDone { + + paddingSize, n := binary.Varint(conn.readBuffer[0:conn.readLength]) + if (paddingSize == 0 && n <= 0) || paddingSize >= int64(conn.readLength) { + return 0, errors.TraceNew("invalid padding") + } + + if paddingSize < 0 { + + // When the padding header indicates a padding size of -1, the + // peer is indicating that padding is done. Subsequent + // messages will have no padding header or padding bytes. + + conn.peerPaddingDone = true + conn.readOffset += n + + } else { + + conn.readOffset += n + int(paddingSize) + + atomic.AddInt32(&conn.paddedMessagesReceived, 1) + if conn.readOffset == conn.readLength { + atomic.AddInt32(&conn.decoyMessagesReceived, 1) + } + } + } + } + + n := copy(p, conn.readBuffer[conn.readOffset:conn.readLength]) + conn.readOffset += n + + var err error + if conn.readOffset == conn.readLength { + err = conn.readError + } + + // When decoy messages are enabled, periodically response to an incoming + // messages with an immediate outbound decoy message. This is similar to + // the design here: + // https://github.com/Psiphon-Labs/psiphon-tunnel-core/blob/c4f6a593a645db4479a7032a9e97d3c0b905cdfc/psiphon/common/quic/obfuscator.go#L361-L409 + // + // writeMessage handles conn.decoyMessageCount, which is syncronized with + // conn.WriteMutex, as well as other specific logic. Here we just signal + // writeMessage based on the read event. + // + // When the data channel already has buffered writes in excess of a decoy + // message size, the writeMessage skips the decoy message and returns + // without blocking, so Read calls will not block. + + if !decoyDone { + _, _ = conn.writeMessage(nil, true) + } + + return n, errors.Trace(err) +} + +func (conn *webRTCConn) Write(p []byte) (int, error) { + return conn.writeMessage(p, false) +} + +func (conn *webRTCConn) writeMessage(p []byte, decoy bool) (int, error) { + + if p != nil && decoy { + return 0, errors.TraceNew("invalid write parameters") + } + + // pion/sctp doesn't handle 0-byte writes correctly, so drop/skip at this level. + // + // Testing shows that the SCTP connection stalls after a 0-byte write. In + // the pion/sctp implementation, + // https://github.com/pion/sctp/blob/v1.8.8/stream.go#L254-L278 and + // https://github.com/pion/sctp/blob/v1.8.8/stream.go#L280-L336, it + // appears that a zero-byte write won't send an SCTP messages but does + // increment a sequence number. + + if len(p) == 0 && !decoy { + return 0, nil + } + + // Don't hold this lock, or else concurrent Reads will be blocked. + conn.mutex.Lock() + isClosed := conn.isClosed + bufferedAmount := conn.dataChannel.BufferedAmount() + dataChannelConn := conn.dataChannelConn + conn.mutex.Unlock() + + if isClosed { + return 0, errors.TraceNew("closed") + } + + if dataChannelConn == nil { + return 0, errors.TraceNew("no data channel") + } + + // Only proceed with a decoy message when no pending writes are buffered. + // + // This check is made before acquiring conn.writeMutex so that, in most + // cases, writeMessage won't block Read calls when a concurrent Write is + // holding conn.writeMutex and potentially blocking on flow control. + // There's still a chance that this test passes, and a concurrent Write + // arrives at the same time. + + if decoy && bufferedAmount > 0 { + return 0, nil + } + + conn.writeMutex.Lock() + defer conn.writeMutex.Unlock() + + writeSize := len(p) + + // Determine padding size and padding header size. + + doPadding := false + paddingSize := 0 + var paddingHeader [binary.MaxVarintLen32]byte + paddingHeaderSize := 0 + + if decoy { + + if conn.decoyMessageCount < 1 { + return 0, nil + } + + if !conn.trafficShapingPRNG.FlipWeightedCoin( + conn.config.TrafficShapingParameters.DecoyMessageProbability) { + return 0, nil + } + + conn.decoyMessageCount -= 1 + + decoySize := conn.trafficShapingPRNG.Range( + conn.config.TrafficShapingParameters.MinDecoySize, + conn.config.TrafficShapingParameters.MaxDecoySize) + + // When sending a decoy message, the entire message is padding. + + doPadding = true + paddingSize = decoySize + + if conn.decoyMessageCount == 0 { + + // Set the shared flag that readMessage uses to stop invoking + // writeMessage for decoy events. + + conn.mutex.Lock() + conn.decoyDone = true + conn.mutex.Unlock() + } + + } else if conn.paddedMessageCount > 0 { + + // Add padding to a normal write. + + conn.paddedMessageCount -= 1 + + doPadding = true + paddingSize = prng.Range( + conn.config.TrafficShapingParameters.MinPaddingSize, + conn.config.TrafficShapingParameters.MaxPaddingSize) + + } else if conn.decoyMessageCount > 0 { + + // Padding normal messages is done, but there are still outstanding + // decoy messages, so add a padding header indicating padding size 0 + // to this normal message. + + doPadding = true + paddingSize = 0 + + } else if !conn.trafficShapingDone { + + // Padding normal messages is done and all decoy messages are sent, so + // send a special padding header with padding size -1, signaling the + // peer that no additional padding will be performed and no + // subsequent messages will contain a padding header. + + doPadding = true + paddingSize = -1 + + } + + if doPadding { + + if paddingSize > 0 { + + // Reduce, if necessary, to stay within the maximum data channel + // message size. This is not expected to happen for the io.Copy use + // case, with 32K message size, plus reasonable padding sizes. + + if writeSize+binary.MaxVarintLen32+paddingSize > dataChannelMaxMessageSize { + paddingSize -= (writeSize + binary.MaxVarintLen32 + paddingSize) - dataChannelMaxMessageSize + if paddingSize < 0 { + paddingSize = 0 + } + } + + // Add padding overhead to total writeSize before the flow control check. + + writeSize += paddingSize + } + + paddingHeaderSize = binary.PutVarint(paddingHeader[:], int64(paddingSize)) + writeSize += paddingHeaderSize + } + + if writeSize > dataChannelMaxMessageSize { + return 0, errors.TraceNew("write too large") + } + + // Flow control is required to ensure that Write calls don't result in + // unbounded buffering in pion/webrtc. Use similar logic and the same + // buffer size thresholds as the pion sample code. + // + // https://github.com/pion/webrtc/tree/master/examples/data-channels-flow-control#when-do-we-need-it: + // > Send or SendText methods are called on DataChannel to send data to + // > the connected peer. The methods return immediately, but it does not + // > mean the data was actually sent onto the wire. Instead, it is + // > queued in a buffer until it actually gets sent out to the wire. + // > + // > When you have a large amount of data to send, it is an application's + // > responsibility to control the buffered amount in order not to + // > indefinitely grow the buffer size to eventually exhaust the memory. + + // If the pion write buffer is too full, wait for a signal that sufficient + // write data has been consumed before writing more. + if !isClosed && bufferedAmount+uint64(writeSize) > dataChannelMaxBufferedAmount { + select { + case <-conn.dataChannelWriteBufferSignal: + case <-conn.closedSignal: + return 0, errors.TraceNew("connection has closed") + } + } + + if conn.trafficShapingDone { + + // When traffic shaping is done, p is written directly without the + // additional trafficShapingBuffer copy. + + // Limitation: if len(p) > 65536, the dataChannelConn.Write will fail. In + // practise, this is not expected to happen with typical use cases such + // as io.Copy, which uses a 32K buffer. + n, err := dataChannelConn.Write(p) + + return n, errors.Trace(err) + } + + conn.trafficShapingBuffer.Reset() + conn.trafficShapingBuffer.Write(paddingHeader[:paddingHeaderSize]) + if paddingSize > 0 { + conn.trafficShapingBuffer.Write(prng.Bytes(paddingSize)) + } + conn.trafficShapingBuffer.Write(p) + + // Limitation: see above; len(p) + padding must be <= 65536. + _, err := dataChannelConn.Write(conn.trafficShapingBuffer.Bytes()) + + if decoy { + atomic.AddInt32(&conn.decoyMessagesSent, 1) + } else if doPadding && paddingSize > 0 { + atomic.AddInt32(&conn.paddedMessagesSent, 1) + } + + if conn.paddedMessageCount == 0 && conn.decoyMessageCount == 0 && paddingSize == -1 { + + // Set flag indicating -1 padding size was sent and release traffic + // shaping resources. + + conn.trafficShapingDone = true + conn.trafficShapingPRNG = nil + conn.trafficShapingBuffer = nil + } + + return len(p), errors.Trace(err) +} + +func (conn *webRTCConn) LocalAddr() net.Addr { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + // This is the local UDP socket address, not the external, public address. + return conn.udpConn.LocalAddr() +} + +func (conn *webRTCConn) RemoteAddr() net.Addr { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + // Not supported. + return nil +} + +func (conn *webRTCConn) SetDeadline(t time.Time) error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + return errors.TraceNew("not supported") +} + +func (conn *webRTCConn) SetReadDeadline(t time.Time) error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + if conn.isClosed { + return errors.TraceNew("closed") + } + + readDeadliner, ok := conn.dataChannelConn.(datachannel.ReadDeadliner) + if !ok { + return errors.TraceNew("no data channel") + } + + return readDeadliner.SetReadDeadline(t) +} + +func (conn *webRTCConn) SetWriteDeadline(t time.Time) error { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + return errors.TraceNew("not supported") +} + +// GetMetrics implements the common.MetricsSource interface and returns log +// fields detailing the WebRTC dial parameters. +func (conn *webRTCConn) GetMetrics() common.LogFields { + conn.mutex.Lock() + defer conn.mutex.Unlock() + + logFields := make(common.LogFields) + + logFields.Add(conn.iceCandidatePairMetrics) + + randomizeDTLS := "0" + if conn.config.DoDTLSRandomization { + randomizeDTLS = "1" + } + logFields["inproxy_webrtc_randomize_dtls"] = randomizeDTLS + + logFields["inproxy_webrtc_padded_messages_sent"] = atomic.LoadInt32(&conn.paddedMessagesSent) + logFields["inproxy_webrtc_padded_messages_received"] = atomic.LoadInt32(&conn.paddedMessagesReceived) + logFields["inproxy_webrtc_decoy_messages_sent"] = atomic.LoadInt32(&conn.decoyMessagesSent) + logFields["inproxy_webrtc_decoy_messages_received"] = atomic.LoadInt32(&conn.decoyMessagesReceived) + + return logFields +} + +func (conn *webRTCConn) onConnectionStateChange(state webrtc.PeerConnectionState) { + + // Close the WebRTCConn when the connection is no longer connected. Close + // will lock conn.mutex, so do lot aquire the lock here. + // + // Currently, ICE Restart is not used, and there is no transition from + // Disconnected back to Connected. + + switch state { + case webrtc.PeerConnectionStateDisconnected, + webrtc.PeerConnectionStateFailed, + webrtc.PeerConnectionStateClosed: + conn.Close() + } + + conn.config.Logger.WithTraceFields(common.LogFields{ + "state": state.String(), + }).Info("peer connection state changed") +} + +func (conn *webRTCConn) onICECandidate(candidate *webrtc.ICECandidate) { + if candidate == nil { + return + } + + conn.config.Logger.WithTraceFields(common.LogFields{ + "candidate": candidate.String(), + }).Info("new ICE candidate") +} + +func (conn *webRTCConn) onICEBindingRequest(m *stun.Message, local, remote ice.Candidate, pair *ice.CandidatePair) bool { + + // SetICEBindingRequestHandler is used to hook onICEBindingRequest into + // STUN bind events for logging. The return values is always false as + // this callback makes no adjustments to ICE candidate selection. When + // the data channel has already opened, skip logging events, as this + // callback appears to be invoked for keepalive pings. + + if local == nil || remote == nil { + return false + } + + select { + case <-conn.dataChannelOpenedSignal: + return false + default: + } + + conn.config.Logger.WithTraceFields(common.LogFields{ + "local_candidate": local.String(), + "remote_candidate": remote.String(), + }).Info("new ICE STUN binding request") + + return false +} + +func (conn *webRTCConn) onICEConnectionStateChange(state webrtc.ICEConnectionState) { + + conn.config.Logger.WithTraceFields(common.LogFields{ + "state": state.String(), + }).Info("ICE connection state changed") +} + +func (conn *webRTCConn) onICEGatheringStateChange(state webrtc.ICEGathererState) { + + conn.config.Logger.WithTraceFields(common.LogFields{ + "state": state.String(), + }).Info("ICE gathering state changed") +} + +func (conn *webRTCConn) onNegotiationNeeded() { + + conn.config.Logger.WithTrace().Info("negotiation needed") +} + +func (conn *webRTCConn) onSignalingStateChange(state webrtc.SignalingState) { + + conn.config.Logger.WithTraceFields(common.LogFields{ + "state": state.String(), + }).Info("signaling state changed") +} + +func (conn *webRTCConn) onDataChannel(dataChannel *webrtc.DataChannel) { + + conn.mutex.Lock() + defer conn.mutex.Unlock() + + conn.setDataChannel(dataChannel) + + conn.config.Logger.WithTraceFields(common.LogFields{ + "label": dataChannel.Label(), + "ID": dataChannel.ID(), + }).Info("new data channel") +} + +func (conn *webRTCConn) onDataChannelOpen() { + + conn.mutex.Lock() + defer conn.mutex.Unlock() + + dataChannelConn, err := conn.dataChannel.Detach() + if err == nil { + conn.dataChannelConn = dataChannelConn + + // TODO: can a data channel be connected, disconnected, and then + // reestablished in one session? + + conn.dataChannelOpenedOnce.Do(func() { close(conn.dataChannelOpenedSignal) }) + } + + conn.config.Logger.WithTraceFields(common.LogFields{ + "detachError": err, + }).Info("data channel open") +} + +func (conn *webRTCConn) onDataChannelClose() { + + // Close the WebRTCConn when the data channel is closed. Close will lock + // conn.mutex, so do lot aquire the lock here. + conn.Close() + + conn.config.Logger.WithTrace().Info("data channel closed") +} + +// prepareSDPAddresses adjusts the SDP, pruning local network addresses and +// adding any port mapping as a host candidate. +func prepareSDPAddresses( + encodedSDP []byte, + errorOnNoCandidates bool, + portMappingExternalAddr string, + disableIPv6Candidates bool) ([]byte, *webRTCSDPMetrics, error) { + + modifiedSDP, metrics, err := processSDPAddresses( + encodedSDP, + portMappingExternalAddr, + disableIPv6Candidates, + errorOnNoCandidates, + nil, + common.GeoIPData{}) + return modifiedSDP, metrics, errors.Trace(err) +} + +// filterSDPAddresses checks that the SDP does not contain an empty list of +// candidates, bogon candidates, or candidates outside of the country and ASN +// for the specified expectedGeoIPData. Invalid candidates are stripped and a +// filtered SDP is returned. +func filterSDPAddresses( + encodedSDP []byte, + errorOnNoCandidates bool, + lookupGeoIP LookupGeoIP, + expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { + + filteredSDP, metrics, err := processSDPAddresses( + encodedSDP, + "", + false, + errorOnNoCandidates, + lookupGeoIP, + expectedGeoIPData) + return filteredSDP, metrics, errors.Trace(err) +} + +// webRTCSDPMetrics are network capability metrics values for an SDP. +type webRTCSDPMetrics struct { + iceCandidateTypes []ICECandidateType + hasIPv6 bool + filteredICECandidates []string +} + +// processSDPAddresses is based on snowflake/common/util.StripLocalAddresses +// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/blob/v2.5.1/common/util/util.go#L70-99 +/* + This file contains the license for "Snowflake" + a free software project which provides a WebRTC pluggable transport. + +================================================================================ +Copyright (c) 2016, Serene Han, Arlo Breault +Copyright (c) 2019-2020, The Tor Project, Inc + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + + * Neither the names of the copyright owners nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +================================================================================ + +*/ +func processSDPAddresses( + encodedSDP []byte, + portMappingExternalAddr string, + disableIPv6Candidates bool, + errorOnNoCandidates bool, + lookupGeoIP LookupGeoIP, + expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { + + var sessionDescription sdp.SessionDescription + err := sessionDescription.Unmarshal(encodedSDP) + if err != nil { + return nil, nil, errors.Trace(err) + } + + candidateTypes := map[ICECandidateType]bool{} + hasIPv6 := false + filteredCandidateReasons := make(map[string]int) + + var portMappingICECandidates []sdp.Attribute + if portMappingExternalAddr != "" { + + // Prepare ICE candidate attibute pair for the port mapping, modeled + // after the definition of host candidates. + + host, portStr, err := net.SplitHostPort(portMappingExternalAddr) + if err != nil { + return nil, nil, errors.Trace(err) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // Only IPv4 port mapping addresses are supported due to the + // NewCandidateHost limitation noted below. It is expected that port + // mappings will be IPv4, as NAT and IPv6 is not a typical combination. + + hostIP := net.ParseIP(host) + if hostIP != nil && hostIP.To4() != nil { + + for _, component := range []webrtc.ICEComponent{webrtc.ICEComponentRTP, webrtc.ICEComponentRTCP} { + + // The candidate ID is generated and the priority and foundation + // use the default for hosts. + // + // Limitation: NewCandidateHost initializes the networkType to + // NetworkTypeUDP4, and this field is not-exported. + // https://github.com/pion/ice/blob/6d301287654b05a36248842c278d58d501454bff/candidate_host.go#L27-L64 + + iceCandidate, err := ice.NewCandidateHost(&ice.CandidateHostConfig{ + Network: "udp", + Address: host, + Port: port, + Component: uint16(component), + }) + if err != nil { + return nil, nil, errors.Trace(err) + } + + portMappingICECandidates = append( + portMappingICECandidates, + sdp.Attribute{Key: "candidate", Value: iceCandidate.Marshal()}) + } + + candidateTypes[ICECandidatePortMapping] = true + } + } + + candidateCount := len(portMappingICECandidates) + + for _, mediaDescription := range sessionDescription.MediaDescriptions { + + addPortMappingCandidates := len(portMappingICECandidates) > 0 + var attributes []sdp.Attribute + for _, attribute := range mediaDescription.Attributes { + + // Insert the port mapping candidate either before the + // first "a=candidate", or before "a=end-of-candidates"(there may + // be no "a=candidate" attributes). + + if addPortMappingCandidates && + (attribute.IsICECandidate() || attribute.Key == sdp.AttrKeyEndOfCandidates) { + + attributes = append(attributes, portMappingICECandidates...) + addPortMappingCandidates = false + } + + if attribute.IsICECandidate() { + + candidate, err := ice.UnmarshalCandidate(attribute.Value) + if err != nil { + return nil, nil, errors.Trace(err) + } + + candidateIP := net.ParseIP(candidate.Address()) + + if candidateIP == nil { + return nil, nil, errors.TraceNew("unexpected non-IP") + } + + candidateIsIPv6 := false + if candidateIP.To4() == nil { + if disableIPv6Candidates { + reason := fmt.Sprintf("disabled %s IPv6", + candidate.Type().String()) + filteredCandidateReasons[reason] += 1 + continue + } + candidateIsIPv6 = true + } + + // Strip non-routable bogons, including LAN addresses. + // Same-LAN client/proxy hops are not expected to be useful, + // and this also avoids unnecessary local network traffic. + // + // Well-behaved clients and proxies should strip these values; + // the broker enforces this with filtering. + + if !GetAllowBogonWebRTCConnections() && + common.IsBogon(candidateIP) { + + version := "IPv4" + if candidateIsIPv6 { + version = "IPv6" + } + reason := fmt.Sprintf("bogon %s %s", + candidate.Type().String(), version) + filteredCandidateReasons[reason] += 1 + continue + } + + // The broker will check that clients and proxies specify only + // candidates that map to the same GeoIP country and ASN as + // the client/proxy connection to the broker. This limits + // misuse of candidates to connect to other locations. + // Legitimate candidates will not all have the exact same IP + // address, as there could be a mix of IPv4 and IPv6, as well + // as potentially different NAT paths. + // + // In some cases, legitimate clients and proxies may + // unintentionally submit candidates with mismatching GeoIP. + // This can occur, for example, when a STUN candidate is only + // a partial hole punch through double NAT, and when internal + // network addresses misuse non-private IP ranges (so are + // technically not bogons). Instead of outright rejecting + // SDPs containing unexpected GeoIP candidates, they are + // instead stripped out and the resulting filtered SDP is + // used. + + if lookupGeoIP != nil { + candidateGeoIPData := lookupGeoIP(candidate.Address()) + + if candidateGeoIPData.Country != expectedGeoIPData.Country || + candidateGeoIPData.ASN != expectedGeoIPData.ASN { + + version := "IPv4" + if candidateIsIPv6 { + version = "IPv6" + } + reason := fmt.Sprintf( + "unexpected GeoIP %s %s: %s/%s", + candidate.Type().String(), + version, + candidateGeoIPData.Country, + candidateGeoIPData.ASN) + filteredCandidateReasons[reason] += 1 + continue + } + } + + if candidateIsIPv6 { + hasIPv6 = true + } + + // These types are not reported: + // - CandidateTypeRelay: TURN servers are not used. + // - CandidateTypePeerReflexive: this candidate type only + // emerges later in the connection process. + + switch candidate.Type() { + case ice.CandidateTypeHost: + candidateTypes[ICECandidateHost] = true + case ice.CandidateTypeServerReflexive: + candidateTypes[ICECandidateServerReflexive] = true + } + + candidateCount += 1 + } + + attributes = append(attributes, attribute) + } + + mediaDescription.Attributes = attributes + } + + if errorOnNoCandidates && candidateCount == 0 { + return nil, nil, errors.TraceNew("no candidates") + } + + encodedSDP, err = sessionDescription.Marshal() + if err != nil { + return nil, nil, errors.Trace(err) + } + + metrics := &webRTCSDPMetrics{ + hasIPv6: hasIPv6, + } + for candidateType := range candidateTypes { + metrics.iceCandidateTypes = append(metrics.iceCandidateTypes, candidateType) + } + for reason, count := range filteredCandidateReasons { + metrics.filteredICECandidates = append(metrics.filteredICECandidates, + fmt.Sprintf("%s: %d", reason, count)) + } + + return encodedSDP, metrics, nil +} + +type pionLoggerFactory struct { + logger common.Logger + debugLogging bool +} + +func newPionLoggerFactory(logger common.Logger, debugLogging bool) *pionLoggerFactory { + return &pionLoggerFactory{ + logger: logger, + debugLogging: debugLogging, + } +} + +func (f *pionLoggerFactory) NewLogger(scope string) pion_logging.LeveledLogger { + return newPionLogger(scope, f.logger, f.debugLogging) +} + +// pionLogger wraps common.Logger and implements +// https://pkg.go.dev/github.com/pion/logging#LeveledLogger for passing into +// pion. +type pionLogger struct { + scope string + logger common.Logger + debugLogging bool +} + +func newPionLogger(scope string, logger common.Logger, debugLogging bool) *pionLogger { + return &pionLogger{ + scope: scope, + logger: logger, + debugLogging: debugLogging, + } +} + +func (l *pionLogger) Trace(msg string) { + if !l.debugLogging { + return + } + l.logger.WithTrace().Debug(fmt.Sprintf("webRTC: %s: %s", l.scope, msg)) +} + +func (l *pionLogger) Tracef(format string, args ...interface{}) { + if !l.debugLogging { + return + } + l.logger.WithTrace().Debug(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) +} + +func (l *pionLogger) Debug(msg string) { + if !l.debugLogging { + return + } + l.logger.WithTrace().Debug(fmt.Sprintf("[webRTC: %s: %s", l.scope, msg)) +} + +func (l *pionLogger) Debugf(format string, args ...interface{}) { + if !l.debugLogging { + return + } + l.logger.WithTrace().Debug(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) +} + +func (l *pionLogger) Info(msg string) { + l.logger.WithTrace().Info(fmt.Sprintf("webRTC: %s: %s", l.scope, msg)) +} + +func (l *pionLogger) Infof(format string, args ...interface{}) { + l.logger.WithTrace().Info(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) +} + +func (l *pionLogger) Warn(msg string) { + l.logger.WithTrace().Warning(fmt.Sprintf("webRTC: %s: %s", l.scope, msg)) +} + +func (l *pionLogger) Warnf(format string, args ...interface{}) { + l.logger.WithTrace().Warning(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) +} + +func (l *pionLogger) Error(msg string) { + l.logger.WithTrace().Error(fmt.Sprintf("webRTC: %s: %s", l.scope, msg)) +} + +func (l *pionLogger) Errorf(format string, args ...interface{}) { + l.logger.WithTrace().Error(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) +} + +// pionNetwork implements pion/transport.Net. +// +// Via the SettingsEngine, pion is configured to use a pionNetwork instance, +// which providing alternative implementations for various network functions. +// The Interfaces implementation provides a workaround for Android +// net.Interfaces issues and reduces the number of IPv6 candidates to avoid +// excess STUN requests; and the ResolveUDPAddr implementation hooks into the +// Psiphon custom resolver. +type pionNetwork struct { + dialCtx context.Context + logger pion_logging.LeveledLogger + webRTCDialCoordinator WebRTCDialCoordinator +} + +func newPionNetwork( + dialCtx context.Context, + logger pion_logging.LeveledLogger, + webRTCDialCoordinator WebRTCDialCoordinator) *pionNetwork { + + return &pionNetwork{ + dialCtx: dialCtx, + logger: logger, + webRTCDialCoordinator: webRTCDialCoordinator, + } +} + +func (p *pionNetwork) Interfaces() ([]*transport.Interface, error) { + + // To determine the active IPv4 and IPv6 interfaces, let the OS bind IPv4 + // and IPv6 UDP sockets with a specified external destination address. + // Then iterate over all interfaces, but return interface info for only + // the interfaces those sockets were bound to. + // + // The destination IPs are the IPs that currently resolve for example.com. + // No actual traffic to these IPs or example.com is sent, as the UDP + // sockets are not used to send any packets. + // + // This scheme should select just one IPv4 and one IPv6 address, which + // should be the active, externally routable addresses, and the IPv6 + // address should be the preferred, non-deprecated temporary IPv6 address. + // + // The anet package is used to work around net.Interfaces not working on + // Android at this time: https://github.com/golang/go/issues/40569. + // + // In post-ICE gathering processing, processSDPAddresses will also strip + // all bogon addresses, so there is no explicit bogon check here. + // + // Limitations: + // + // - The active interface could change between the socket operation and + // iterating over all interfaces. Higher-level code is expected to + // react to active network changes. + // + // - The public IPs for example.com may not be robust in all routing + // situations. Alternatively, we could use the configured STUN server + // as the test destination, but the STUN server domain is not resolved + // at this point and STUN is not always configured and used. + // + // - The results could be cached and reused. + + var defaultIPv4, defaultIPv6 net.IP + + udpConnIPv4, err := p.webRTCDialCoordinator.UDPConn( + context.Background(), "udp4", "93.184.216.34:3478") + if err == nil { + defaultIPv4 = udpConnIPv4.LocalAddr().(*net.UDPAddr).IP + udpConnIPv4.Close() + } + + udpConnIPv6, err := p.webRTCDialCoordinator.UDPConn( + context.Background(), "udp6", "[2606:2800:220:1:248:1893:25c8:1946]:3478") + if err == nil { + defaultIPv6 = udpConnIPv6.LocalAddr().(*net.UDPAddr).IP + udpConnIPv6.Close() + } + + transportInterfaces := []*transport.Interface{} + + netInterfaces, err := anet.Interfaces() + + if err != nil { + return nil, errors.Trace(err) + } + + for _, netInterface := range netInterfaces { + // Note: don't exclude interfaces with the net.FlagPointToPoint flag, + // which is set for certain mobile networks + if (netInterface.Flags&net.FlagUp == 0) || + (!GetAllowBogonWebRTCConnections() && (netInterface.Flags&net.FlagLoopback != 0)) { + continue + } + addrs, err := anet.InterfaceAddrsByInterface(&netInterface) + if err != nil { + return nil, errors.Trace(err) + } + var transportInterface *transport.Interface + for _, addr := range addrs { + IP, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, errors.Trace(err) + } + if IP.Equal(defaultIPv4) || IP.Equal(defaultIPv6) || + (GetAllowBogonWebRTCConnections() && (netInterface.Flags&net.FlagLoopback != 0)) { + if transportInterface == nil { + transportInterface = transport.NewInterface(netInterface) + } + transportInterface.AddAddress(addr) + } + } + if transportInterface != nil { + transportInterfaces = append(transportInterfaces, transportInterface) + } + } + + return transportInterfaces, nil +} + +func (p *pionNetwork) ResolveUDPAddr(network, address string) (retAddr *net.UDPAddr, retErr error) { + + defer func() { + if retErr != nil { + // Explicitly log an error since certain pion operations -- e.g., + // ICE gathering -- don't propagate all pion/transport.Net errors. + p.logger.Errorf("pionNetwork.ResolveUDPAddr failed: %v", retErr) + } + }() + + // Currently, pion appears to call ResolveUDPAddr with "udp4"/udp6" + // instead of "ip4"/"ip6", as expected by, e.g., net.Resolver.LookupIP. + // Convert to "ip4"/"ip6". + + // Specifying v4/v6 ensures that the resolved IP address is the correct + // type. In the case of STUN servers, the correct type is required in + // order to create the correct IPv4 or IPv6 whole punch address. + + switch network { + case "udp4", "tcp4": + network = "ip4" + case "udp6", "tcp6": + network = "ip6" + default: + network = "ip" + } + + // Currently, pion appears to call ResolveUDPAddr with an improperly + // formatted address, :443 not []:443; handle this case. + index := strings.LastIndex(address, ":") + if index != -1 { + address = net.JoinHostPort(address[:index], address[index+1:]) + } + + // Use the Psiphon custom resolver to resolve any STUN server domains. + resolvedAddress, err := p.webRTCDialCoordinator.ResolveAddress( + p.dialCtx, network, address) + if err != nil { + return nil, errors.Trace(err) + } + + IPStr, portStr, err := net.SplitHostPort(resolvedAddress) + if err != nil { + return nil, errors.Trace(err) + } + IP := net.ParseIP(IPStr) + if IP == nil { + return nil, errors.TraceNew("invalid IP address") + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.Trace(err) + } + return &net.UDPAddr{IP: IP, Port: port}, nil +} + +var errNotSupported = std_errors.New("not supported") + +func (p *pionNetwork) ListenPacket(network string, address string) (net.PacketConn, error) { + // Explicitly log an error since certain pion operations -- e.g., ICE + // gathering -- don't propagate all pion/transport.Net errors. + p.logger.Errorf("unexpected pionNetwork.ListenPacket call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) ListenUDP(network string, locAddr *net.UDPAddr) (transport.UDPConn, error) { + p.logger.Errorf("unexpected pionNetwork.ListenUDP call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) ListenTCP(network string, laddr *net.TCPAddr) (transport.TCPListener, error) { + p.logger.Errorf("unexpected pionNetwork.ListenTCP call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) Dial(network, address string) (net.Conn, error) { + p.logger.Errorf("unexpected pionNetwork.Dial call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) DialUDP(network string, laddr, raddr *net.UDPAddr) (transport.UDPConn, error) { + p.logger.Errorf("unexpected pionNetwork.DialUDP call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) DialTCP(network string, laddr, raddr *net.TCPAddr) (transport.TCPConn, error) { + p.logger.Errorf("unexpected pionNetwork.DialTCP call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) ResolveIPAddr(network, address string) (*net.IPAddr, error) { + p.logger.Errorf("unexpected pionNetwork.ResolveIPAddr call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) ResolveTCPAddr(network, address string) (*net.TCPAddr, error) { + p.logger.Errorf("unexpected pionNetwork.ResolveTCPAddr call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) InterfaceByIndex(index int) (*transport.Interface, error) { + p.logger.Errorf("unexpected pionNetwork.InterfaceByIndex call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) InterfaceByName(name string) (*transport.Interface, error) { + p.logger.Errorf("unexpected pionNetwork.InterfaceByName call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} + +func (p *pionNetwork) CreateDialer(dialer *net.Dialer) transport.Dialer { + return &pionNetworkDialer{pionNetwork: p} +} + +type pionNetworkDialer struct { + pionNetwork *pionNetwork +} + +func (d pionNetworkDialer) Dial(network, address string) (net.Conn, error) { + d.pionNetwork.logger.Errorf("unexpected pionNetworkDialer.Dial call from %s", stacktrace.GetParentFunctionName()) + return nil, errors.Trace(errNotSupported) +} diff --git a/psiphon/common/logger.go b/psiphon/common/logger.go index 6bd335035..2c65afba6 100644 --- a/psiphon/common/logger.go +++ b/psiphon/common/logger.go @@ -28,6 +28,10 @@ type Logger interface { WithTrace() LogTrace WithTraceFields(fields LogFields) LogTrace LogMetric(metric string, fields LogFields) + + // IsLogLevelDebug is used to skip formatting debug-level log messages in + // cases where performance would be impacted. + IsLogLevelDebug() bool } // LogTrace is interface-compatible with the return values from diff --git a/psiphon/common/net.go b/psiphon/common/net.go index 658254435..8609874dc 100644 --- a/psiphon/common/net.go +++ b/psiphon/common/net.go @@ -22,8 +22,10 @@ package common import ( "container/list" "context" + "io" "net" "net/http" + "net/netip" "strconv" "sync" "time" @@ -150,51 +152,68 @@ func PortFromAddr(addr net.Addr) int { // close a set of open connections, etc. // Once the list is closed, no more items may be added to the // list (unless it is reset). -type Conns struct { +type Conns[T interface { + comparable + io.Closer +}] struct { mutex sync.Mutex isClosed bool - conns map[net.Conn]bool + conns map[T]bool } // NewConns initializes a new Conns. -func NewConns() *Conns { - return &Conns{} +func NewConns[T interface { + comparable + io.Closer +}]() *Conns[T] { + return &Conns[T]{} } -func (conns *Conns) Reset() { +func (conns *Conns[T]) Reset() { conns.mutex.Lock() defer conns.mutex.Unlock() conns.isClosed = false - conns.conns = make(map[net.Conn]bool) + conns.conns = make(map[T]bool) } -func (conns *Conns) Add(conn net.Conn) bool { +func (conns *Conns[T]) Add(conn T) bool { conns.mutex.Lock() defer conns.mutex.Unlock() if conns.isClosed { return false } if conns.conns == nil { - conns.conns = make(map[net.Conn]bool) + conns.conns = make(map[T]bool) } conns.conns[conn] = true return true } -func (conns *Conns) Remove(conn net.Conn) { +func (conns *Conns[T]) Remove(conn T) { conns.mutex.Lock() defer conns.mutex.Unlock() delete(conns.conns, conn) } -func (conns *Conns) CloseAll() { +func (conns *Conns[T]) CloseAll() { + conns.mutex.Lock() - defer conns.mutex.Unlock() conns.isClosed = true - for conn := range conns.conns { - conn.Close() + closeConns := conns.conns + conns.conns = make(map[T]bool) + conns.mutex.Unlock() + + // Close is invoked outside of the mutex in case a member conn's Close + // invokes Remove. + for conn := range closeConns { + _ = conn.Close() } - conns.conns = make(map[net.Conn]bool) +} + +func (conns *Conns[T]) IsClosed() bool { + conns.mutex.Lock() + defer conns.mutex.Unlock() + return conns.isClosed } // LRUConns is a concurrency-safe list of net.Conns ordered @@ -331,7 +350,7 @@ func (conn *WriteTimeoutUDPConn) Write(b []byte) (int, error) { return 0, errors.Trace(err) } - // Do not wrap any I/O err returned by udpConn + // Do not wrap any I/O err returned by UDPConn return conn.UDPConn.Write(b) } @@ -342,10 +361,21 @@ func (conn *WriteTimeoutUDPConn) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) ( return 0, 0, errors.Trace(err) } - // Do not wrap any I/O err returned by udpConn + // Do not wrap any I/O err returned by UDPConn return conn.UDPConn.WriteMsgUDP(b, oob, addr) } +func (conn *WriteTimeoutUDPConn) WriteMsgUDPAddrPort(b, oob []byte, addr netip.AddrPort) (int, int, error) { + + err := conn.SetWriteDeadline(time.Now().Add(UDP_PACKET_WRITE_TIMEOUT)) + if err != nil { + return 0, 0, errors.Trace(err) + } + + // Do not wrap any I/O err returned by UDPConn + return conn.UDPConn.WriteMsgUDPAddrPort(b, oob, addr) +} + func (conn *WriteTimeoutUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { err := conn.SetWriteDeadline(time.Now().Add(UDP_PACKET_WRITE_TIMEOUT)) @@ -353,10 +383,21 @@ func (conn *WriteTimeoutUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { return 0, errors.Trace(err) } - // Do not wrap any I/O err returned by udpConn + // Do not wrap any I/O err returned by UDPConn return conn.UDPConn.WriteTo(b, addr) } +func (conn *WriteTimeoutUDPConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) { + + err := conn.SetWriteDeadline(time.Now().Add(UDP_PACKET_WRITE_TIMEOUT)) + if err != nil { + return 0, errors.Trace(err) + } + + // Do not wrap any I/O err returned by UDPConn + return conn.UDPConn.WriteToUDPAddrPort(b, addr) +} + func (conn *WriteTimeoutUDPConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) { err := conn.SetWriteDeadline(time.Now().Add(UDP_PACKET_WRITE_TIMEOUT)) @@ -364,7 +405,7 @@ func (conn *WriteTimeoutUDPConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, e return 0, errors.Trace(err) } - // Do not wrap any I/O err returned by udpConn + // Do not wrap any I/O err returned by UDPConn return conn.UDPConn.WriteToUDP(b, addr) } @@ -383,6 +424,21 @@ func (conn *WriteTimeoutPacketConn) WriteTo(b []byte, addr net.Addr) (int, error return 0, errors.Trace(err) } - // Do not wrap any I/O err returned by udpConn + // Do not wrap any I/O err returned by PacketConn return conn.PacketConn.WriteTo(b, addr) } + +// GetMetrics implements the common.MetricsSource interface. +func (conn *WriteTimeoutPacketConn) GetMetrics() LogFields { + + logFields := make(LogFields) + + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := conn.PacketConn.(MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + + return logFields +} diff --git a/psiphon/common/obfuscator/history.go b/psiphon/common/obfuscator/history.go index c79622479..ed42db722 100644 --- a/psiphon/common/obfuscator/history.go +++ b/psiphon/common/obfuscator/history.go @@ -34,6 +34,10 @@ const ( HISTORY_CLIENT_IP_MAX_ENTRIES = 10000 ) +// TODO: rename clientIP to peerIP to reflect newer terminology in +// psiphon/server where the immediate network peer may be an in-proxy proxy, +// not the client. + // SeedHistory maintains a history of recently observed obfuscation seed values. // This history is used to identify duplicate seed messages. // diff --git a/psiphon/common/obfuscator/obfuscatedSshConn.go b/psiphon/common/obfuscator/obfuscatedSshConn.go index 7cef40e6d..062ad72da 100644 --- a/psiphon/common/obfuscator/obfuscatedSshConn.go +++ b/psiphon/common/obfuscator/obfuscatedSshConn.go @@ -175,7 +175,10 @@ func NewObfuscatedSSHConn( } } else { - // NewServerObfuscator reads a seed message from conn + // NewServerObfuscator reads a seed message from conn. + // + // DisableStrictHistoryMode is not set, as legitimate clients never + // retry OSSH dials using a previous seed. obfuscator, err = NewServerObfuscator( &ObfuscatorConfig{ Keyword: obfuscationKeyword, diff --git a/psiphon/common/obfuscator/obfuscator.go b/psiphon/common/obfuscator/obfuscator.go index 2b919698d..34db7374c 100644 --- a/psiphon/common/obfuscator/obfuscator.go +++ b/psiphon/common/obfuscator/obfuscator.go @@ -80,11 +80,14 @@ type OSSHPrefixSplitConfig struct { // stream ciphers for: // https://github.com/brl/obfuscated-openssh/blob/master/README.obfuscation // -// Limitation: the RC4 cipher is vulnerable to ciphertext malleability and -// the "magic" value provides only weak authentication due to its small -// size. Increasing the size of the magic field will break compatibility -// with legacy clients. New protocols and schemes should not use this -// obfuscator. +// Limitations: +// - The RC4 cipher is vulnerable to ciphertext malleability and the "magic" +// value provides only weak authentication due to its small size. +// Increasing the size of the magic field will break compatibility with +// legacy clients. +// - The RC4 cipher does not provide integrity protection for the client +// preamble, particularly the prefix header. +// - New protocols and schemes should not use this obfuscator. type Obfuscator struct { preamble []byte @@ -120,9 +123,9 @@ type ObfuscatorConfig struct { // SeedHistory and IrregularLogger are optional parameters used only by // server obfuscators. - SeedHistory *SeedHistory - StrictHistoryMode bool - IrregularLogger func(clientIP string, err error, logFields common.LogFields) + SeedHistory *SeedHistory + DisableStrictHistoryMode bool + IrregularLogger func(clientIP string, err error, logFields common.LogFields) } // NewClientObfuscator creates a new Obfuscator, staging a seed message to be @@ -344,7 +347,7 @@ func deriveKey(obfuscatorSeed, keyword, iv []byte) ([]byte, error) { // makeClientPreamble generates the preamble bytes for the Obfuscated SSH protocol. // // If a prefix is applied, preamble bytes refer to the prefix, prefix terminator, -// followed by the Obufscted SSH initial client message, followed by the +// followed by the Obfuscated SSH initial client message, followed by the // prefix header. // // If a prefix is not applied, preamble bytes refer to the Obfuscated SSH @@ -369,6 +372,13 @@ func deriveKey(obfuscatorSeed, keyword, iv []byte) ([]byte, error) { // // Returns the preamble, the prefix header if a prefix was generated, // and the padding length. +// +// Limitation: as the RC4 stream cipher does not provide integrity protection, +// the prefix header is not protected from manipulation. The prefix header is +// treated, by the server, as untrusted input, so a corrupt or invalid prefix +// header will result in a failed connection, as would happen with attempts +// to corrupt the underlying SSH connection. However, a man-in-the-middle can +// cause the server to respond with a different prefix. func makeClientPreamble( keyword string, prefixSpec *OSSHPrefixSpec, @@ -431,7 +441,7 @@ func makeClientPreamble( preamble := buffer.Bytes() - // Encryptes what comes after the magic value. + // Encrypts what comes after the magic value. clientToServerCipher.XORKeyStream( preamble[magicValueStartIndex:], preamble[magicValueStartIndex:]) @@ -551,7 +561,7 @@ func readPreambleHelper( // Adds the seed to the seed history only if the magic value is valid. // This is to prevent malicious clients from filling up the history cache. ok, duplicateLogFields := config.SeedHistory.AddNew( - config.StrictHistoryMode, clientIP, "obfuscator-seed", osshSeed) + !config.DisableStrictHistoryMode, clientIP, "obfuscator-seed", osshSeed) errStr := "duplicate obfuscation seed" if duplicateLogFields != nil { if config.IrregularLogger != nil { @@ -686,7 +696,7 @@ func makeTerminator(keyword string, b []byte, direction string) ([]byte, error) return terminator, nil } -// makeTerminatedPrefixWithPadding generates bytes starting with the prefix bytes defiend +// makeTerminatedPrefixWithPadding generates bytes starting with the prefix bytes defined // by spec and ending with the generated terminator. // If the generated prefix is shorter than PREAMBLE_HEADER_LENGTH, it is padded // with random bytes. diff --git a/psiphon/common/obfuscator/obfuscator_test.go b/psiphon/common/obfuscator/obfuscator_test.go index 3ea64a910..584a9d7f5 100644 --- a/psiphon/common/obfuscator/obfuscator_test.go +++ b/psiphon/common/obfuscator/obfuscator_test.go @@ -125,8 +125,8 @@ func TestObfuscator(t *testing.T) { irregularLogFields = nil _, err = NewServerObfuscator(config, clientIP, bytes.NewReader(preamble)) - if err != nil { - t.Fatalf("NewServerObfuscator failed: %s", err) + if err == nil { + t.Fatalf("NewServerObfuscator unexpectedly succeeded") } duplicateClientID := irregularLogFields["duplicate_client_ip"] @@ -540,11 +540,11 @@ func TestIrregularConnections(t *testing.T) { irregularLogFields = nil - // Test: replayed prefixd connection with same IP + // Test: replayed prefixed connection with same IP clientReader = WrapConnWithSkipReader(newConn(preamble)) _, err = NewServerObfuscator(config, clientIP, clientReader) - if err != nil { - t.Fatalf("NewServerObfuscator failed: %s", err) + if err == nil { + t.Fatalf("NewServerObfuscator unexpectedly succeeded") } duplicateClientID := irregularLogFields["duplicate_client_ip"] diff --git a/psiphon/common/packetman/packetman_linux_test.go b/psiphon/common/packetman/packetman_linux_test.go index f2d911f5b..98bf5825d 100644 --- a/psiphon/common/packetman/packetman_linux_test.go +++ b/psiphon/common/packetman/packetman_linux_test.go @@ -1,3 +1,4 @@ +//go:build PSIPHON_RUN_PACKET_MANIPULATOR_TEST // +build PSIPHON_RUN_PACKET_MANIPULATOR_TEST /* @@ -177,6 +178,10 @@ func (logger *testLogger) WithTraceFields(fields common.LogFields) common.LogTra func (logger *testLogger) LogMetric(metric string, fields common.LogFields) { } +func (logger *testLogger) IsLogLevelDebug() bool { + return true +} + type testLogTrace struct { trace string fields common.LogFields diff --git a/psiphon/common/parameters/frontingSpec.go b/psiphon/common/parameters/frontingSpec.go index 8ec6cebd6..e72899d8f 100755 --- a/psiphon/common/parameters/frontingSpec.go +++ b/psiphon/common/parameters/frontingSpec.go @@ -24,6 +24,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/regen" ) @@ -42,11 +43,16 @@ type FrontingSpecs []*FrontingSpec // ServerEntry.MeekFrontingAddresses: multiple candidates are supported, and // each candidate may be a regex, or a static value (with regex syntax). type FrontingSpec struct { + + // Optional/new fields use omitempty to minimize tactics tag churn. + FrontingProviderID string + Transports protocol.FrontingTransports `json:",omitempty"` Addresses []string - DisableSNI bool - VerifyServerName string - VerifyPins []string + DisableSNI bool `json:",omitempty"` + SkipVerify bool `json:",omitempty"` + VerifyServerName string `json:",omitempty"` + VerifyPins []string `json:",omitempty"` Host string } @@ -56,27 +62,35 @@ type FrontingSpec struct { // // The return values are: // - Dial Address (domain or IP address) +// - Transport (e.g., protocol.FRONTING_TRANSPORT_HTTPS) // - SNI (which may be transformed; unless it is "", which indicates omit SNI) // - VerifyServerName (see psiphon.CustomTLSConfig) // - VerifyPins (see psiphon.CustomTLSConfig) // - Host (Host header value) func (specs FrontingSpecs) SelectParameters() ( - string, string, string, string, []string, string, error) { + string, string, string, string, string, []string, string, error) { if len(specs) == 0 { - return "", "", "", "", nil, "", errors.TraceNew("missing fronting spec") + return "", "", "", "", "", nil, "", errors.TraceNew("missing fronting spec") } spec := specs[prng.Intn(len(specs))] if len(spec.Addresses) == 0 { - return "", "", "", "", nil, "", errors.TraceNew("missing fronting address") + return "", "", "", "", "", nil, "", errors.TraceNew("missing fronting address") + } + + // For backwards compatibility, the transport type defaults + // to "FRONTED-HTTPS" when the FrontingSpec specifies no transport types. + transport := protocol.FRONTING_TRANSPORT_HTTPS + if len(spec.Transports) > 0 { + transport = spec.Transports[prng.Intn(len(spec.Transports))] } frontingDialAddr, err := regen.GenerateString( spec.Addresses[prng.Intn(len(spec.Addresses))]) if err != nil { - return "", "", "", "", nil, "", errors.Trace(err) + return "", "", "", "", "", nil, "", errors.Trace(err) } SNIServerName := frontingDialAddr @@ -84,7 +98,13 @@ func (specs FrontingSpecs) SelectParameters() ( SNIServerName = "" } + // When SkipVerify is true, VerifyServerName and VerifyPins must be empty, + // as checked in Validate. When dialing in any mode, MeekConn will set + // CustomTLSConfig.SkipVerify to true as long as VerifyServerName is "". + // So SkipVerify does not need to be explicitly returned. + return spec.FrontingProviderID, + transport, frontingDialAddr, SNIServerName, spec.VerifyServerName, @@ -94,7 +114,7 @@ func (specs FrontingSpecs) SelectParameters() ( } // Validate checks that the JSON values are well-formed. -func (specs FrontingSpecs) Validate() error { +func (specs FrontingSpecs) Validate(allowSkipVerify bool) error { // An empty FrontingSpecs is allowed as a tactics setting, but // SelectParameters will fail at runtime: code that uses FrontingSpecs must @@ -105,6 +125,10 @@ func (specs FrontingSpecs) Validate() error { if len(spec.FrontingProviderID) == 0 { return errors.TraceNew("empty fronting provider ID") } + err := spec.Transports.Validate() + if err != nil { + return errors.Trace(err) + } if len(spec.Addresses) == 0 { return errors.TraceNew("missing fronting addresses") } @@ -113,13 +137,25 @@ func (specs FrontingSpecs) Validate() error { return errors.TraceNew("empty fronting address") } } - if len(spec.VerifyServerName) == 0 { - return errors.TraceNew("empty verify server name") - } - // An empty VerifyPins is allowed. - for _, pin := range spec.VerifyPins { - if len(pin) == 0 { - return errors.TraceNew("empty verify pin") + if spec.SkipVerify { + if !allowSkipVerify { + return errors.TraceNew("invalid skip verify") + } + if len(spec.VerifyServerName) != 0 { + return errors.TraceNew("unexpected verify server name") + } + if len(spec.VerifyPins) != 0 { + return errors.TraceNew("unexpected verify pins") + } + } else { + if len(spec.VerifyServerName) == 0 { + return errors.TraceNew("empty verify server name") + } + // An empty VerifyPins is allowed. + for _, pin := range spec.VerifyPins { + if len(pin) == 0 { + return errors.TraceNew("empty verify pin") + } } } if len(spec.Host) == 0 { diff --git a/psiphon/common/parameters/inproxy.go b/psiphon/common/parameters/inproxy.go new file mode 100755 index 000000000..c66b470d5 --- /dev/null +++ b/psiphon/common/parameters/inproxy.go @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package parameters + +import ( + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" +) + +// InproxyBrokerSpecsValue is a list of in-proxy broker specs. +type InproxyBrokerSpecsValue []*InproxyBrokerSpec + +// InproxyBrokerSpec specifies the configuration to use to establish a secure +// connection to an in-proxy broker. +type InproxyBrokerSpec struct { + BrokerPublicKey string + BrokerRootObfuscationSecret string + BrokerFrontingSpecs FrontingSpecs +} + +// Validate checks that the in-proxy broker specs values are well-formed. +func (specs InproxyBrokerSpecsValue) Validate(checkBrokerPublicKeyList *[]string) error { + + for _, spec := range specs { + if _, err := inproxy.SessionPublicKeyFromString(spec.BrokerPublicKey); err != nil { + return errors.Tracef("invalid broker public key: %w", err) + } + if checkBrokerPublicKeyList != nil && !common.Contains(*checkBrokerPublicKeyList, spec.BrokerPublicKey) { + return errors.TraceNew("unknown broker public key") + } + if _, err := inproxy.ObfuscationSecretFromString(spec.BrokerRootObfuscationSecret); err != nil { + return errors.Tracef("invalid broker root obfuscation secret: %w", err) + } + if len(spec.BrokerFrontingSpecs) == 0 { + return errors.TraceNew("missing broker fronting spec") + } + // Broker fronting specs may specify SkipVerify, since the meek + // payload has it's own transport security layer, the Noise sessions. + // Broker fronting dials use MeekModeWrappedPlaintextRoundTrip. + allowSkipVerify := true + err := spec.BrokerFrontingSpecs.Validate(allowSkipVerify) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// InproxyCompartmentIDsValue is a list of in-proxy common compartment IDs. +type InproxyCompartmentIDsValue []string + +// Validate checks that the in-proxy common compartment ID values are +// well-formed. +func (IDs InproxyCompartmentIDsValue) Validate(checkCompartmentIDList *[]string) error { + + for _, ID := range IDs { + if _, err := inproxy.IDFromString(ID); err != nil { + return errors.Tracef("invalid compartment ID: %w", err) + } + if checkCompartmentIDList != nil && !common.Contains(*checkCompartmentIDList, ID) { + return errors.TraceNew("unknown compartment ID") + } + } + return nil +} + +// InproxyDataChannelTrafficShapingParameters is type-compatible with +// common/inproxy.DataChannelTrafficShapingParameters. +type InproxyDataChannelTrafficShapingParametersValue struct { + MinPaddedMessages int + MaxPaddedMessages int + MinPaddingSize int + MaxPaddingSize int + MinDecoyMessages int + MaxDecoyMessages int + MinDecoySize int + MaxDecoySize int + DecoyMessageProbability float64 +} + +func (p *InproxyDataChannelTrafficShapingParametersValue) Validate() error { + if p.MinPaddedMessages < 0 || + p.MaxPaddedMessages < 0 || + p.MinPaddingSize < 0 || + p.MaxPaddingSize < 0 || + p.MinDecoyMessages < 0 || + p.MaxDecoyMessages < 0 || + p.MinDecoySize < 0 || + p.MaxDecoySize < 0 || + p.DecoyMessageProbability < 0.0 { + return errors.TraceNew("invalid parameter") + } + return nil +} diff --git a/psiphon/common/parameters/parameters.go b/psiphon/common/parameters/parameters.go old mode 100755 new mode 100644 index 5fb5b257a..48d4e2b3e --- a/psiphon/common/parameters/parameters.go +++ b/psiphon/common/parameters/parameters.go @@ -70,303 +70,370 @@ import ( ) const ( - NetworkLatencyMultiplier = "NetworkLatencyMultiplier" - NetworkLatencyMultiplierMin = "NetworkLatencyMultiplierMin" - NetworkLatencyMultiplierMax = "NetworkLatencyMultiplierMax" - NetworkLatencyMultiplierLambda = "NetworkLatencyMultiplierLambda" - TacticsWaitPeriod = "TacticsWaitPeriod" - TacticsRetryPeriod = "TacticsRetryPeriod" - TacticsRetryPeriodJitter = "TacticsRetryPeriodJitter" - TacticsTimeout = "TacticsTimeout" - ConnectionWorkerPoolSize = "ConnectionWorkerPoolSize" - TunnelPoolSize = "TunnelPoolSize" - TunnelConnectTimeout = "TunnelConnectTimeout" - EstablishTunnelTimeout = "EstablishTunnelTimeout" - EstablishTunnelWorkTime = "EstablishTunnelWorkTime" - EstablishTunnelPausePeriod = "EstablishTunnelPausePeriod" - EstablishTunnelPausePeriodJitter = "EstablishTunnelPausePeriodJitter" - EstablishTunnelServerAffinityGracePeriod = "EstablishTunnelServerAffinityGracePeriod" - StaggerConnectionWorkersPeriod = "StaggerConnectionWorkersPeriod" - StaggerConnectionWorkersJitter = "StaggerConnectionWorkersJitter" - LimitIntensiveConnectionWorkers = "LimitIntensiveConnectionWorkers" - UpstreamProxyErrorMinWaitDuration = "UpstreamProxyErrorMinWaitDuration" - UpstreamProxyErrorMaxWaitDuration = "UpstreamProxyErrorMaxWaitDuration" - IgnoreHandshakeStatsRegexps = "IgnoreHandshakeStatsRegexps" - PrioritizeTunnelProtocolsProbability = "PrioritizeTunnelProtocolsProbability" - PrioritizeTunnelProtocols = "PrioritizeTunnelProtocols" - PrioritizeTunnelProtocolsCandidateCount = "PrioritizeTunnelProtocolsCandidateCount" - InitialLimitTunnelProtocolsProbability = "InitialLimitTunnelProtocolsProbability" - InitialLimitTunnelProtocols = "InitialLimitTunnelProtocols" - InitialLimitTunnelProtocolsCandidateCount = "InitialLimitTunnelProtocolsCandidateCount" - LimitTunnelProtocolsProbability = "LimitTunnelProtocolsProbability" - LimitTunnelProtocols = "LimitTunnelProtocols" - LimitTunnelDialPortNumbersProbability = "LimitTunnelDialPortNumbersProbability" - LimitTunnelDialPortNumbers = "LimitTunnelDialPortNumbers" - LimitTLSProfilesProbability = "LimitTLSProfilesProbability" - LimitTLSProfiles = "LimitTLSProfiles" - UseOnlyCustomTLSProfiles = "UseOnlyCustomTLSProfiles" - CustomTLSProfiles = "CustomTLSProfiles" - SelectRandomizedTLSProfileProbability = "SelectRandomizedTLSProfileProbability" - NoDefaultTLSSessionIDProbability = "NoDefaultTLSSessionIDProbability" - DisableFrontingProviderTLSProfiles = "DisableFrontingProviderTLSProfiles" - LimitQUICVersionsProbability = "LimitQUICVersionsProbability" - LimitQUICVersions = "LimitQUICVersions" - DisableFrontingProviderQUICVersions = "DisableFrontingProviderQUICVersions" - QUICDisableClientPathMTUDiscoveryProbability = "QUICDisableClientPathMTUDiscoveryProbability" - FragmentorProbability = "FragmentorProbability" - FragmentorLimitProtocols = "FragmentorLimitProtocols" - FragmentorMinTotalBytes = "FragmentorMinTotalBytes" - FragmentorMaxTotalBytes = "FragmentorMaxTotalBytes" - FragmentorMinWriteBytes = "FragmentorMinWriteBytes" - FragmentorMaxWriteBytes = "FragmentorMaxWriteBytes" - FragmentorMinDelay = "FragmentorMinDelay" - FragmentorMaxDelay = "FragmentorMaxDelay" - FragmentorDownstreamProbability = "FragmentorDownstreamProbability" - FragmentorDownstreamLimitProtocols = "FragmentorDownstreamLimitProtocols" - FragmentorDownstreamMinTotalBytes = "FragmentorDownstreamMinTotalBytes" - FragmentorDownstreamMaxTotalBytes = "FragmentorDownstreamMaxTotalBytes" - FragmentorDownstreamMinWriteBytes = "FragmentorDownstreamMinWriteBytes" - FragmentorDownstreamMaxWriteBytes = "FragmentorDownstreamMaxWriteBytes" - FragmentorDownstreamMinDelay = "FragmentorDownstreamMinDelay" - FragmentorDownstreamMaxDelay = "FragmentorDownstreamMaxDelay" - ObfuscatedSSHMinPadding = "ObfuscatedSSHMinPadding" - ObfuscatedSSHMaxPadding = "ObfuscatedSSHMaxPadding" - TunnelOperateShutdownTimeout = "TunnelOperateShutdownTimeout" - TunnelPortForwardDialTimeout = "TunnelPortForwardDialTimeout" - PacketTunnelReadTimeout = "PacketTunnelReadTimeout" - TunnelRateLimits = "TunnelRateLimits" - AdditionalCustomHeaders = "AdditionalCustomHeaders" - SpeedTestPaddingMinBytes = "SpeedTestPaddingMinBytes" - SpeedTestPaddingMaxBytes = "SpeedTestPaddingMaxBytes" - SpeedTestMaxSampleCount = "SpeedTestMaxSampleCount" - SSHKeepAliveSpeedTestSampleProbability = "SSHKeepAliveSpeedTestSampleProbability" - SSHKeepAlivePaddingMinBytes = "SSHKeepAlivePaddingMinBytes" - SSHKeepAlivePaddingMaxBytes = "SSHKeepAlivePaddingMaxBytes" - SSHKeepAlivePeriodMin = "SSHKeepAlivePeriodMin" - SSHKeepAlivePeriodMax = "SSHKeepAlivePeriodMax" - SSHKeepAlivePeriodicTimeout = "SSHKeepAlivePeriodicTimeout" - SSHKeepAlivePeriodicInactivePeriod = "SSHKeepAlivePeriodicInactivePeriod" - SSHKeepAliveProbeTimeout = "SSHKeepAliveProbeTimeout" - SSHKeepAliveProbeInactivePeriod = "SSHKeepAliveProbeInactivePeriod" - SSHKeepAliveNetworkConnectivityPollingPeriod = "SSHKeepAliveNetworkConnectivityPollingPeriod" - SSHKeepAliveResetOnFailureProbability = "SSHKeepAliveResetOnFailureProbability" - HTTPProxyOriginServerTimeout = "HTTPProxyOriginServerTimeout" - HTTPProxyMaxIdleConnectionsPerHost = "HTTPProxyMaxIdleConnectionsPerHost" - FetchRemoteServerListTimeout = "FetchRemoteServerListTimeout" - FetchRemoteServerListRetryPeriod = "FetchRemoteServerListRetryPeriod" - FetchRemoteServerListStalePeriod = "FetchRemoteServerListStalePeriod" - RemoteServerListSignaturePublicKey = "RemoteServerListSignaturePublicKey" - RemoteServerListURLs = "RemoteServerListURLs" - ObfuscatedServerListRootURLs = "ObfuscatedServerListRootURLs" - PsiphonAPIRequestTimeout = "PsiphonAPIRequestTimeout" - PsiphonAPIStatusRequestPeriodMin = "PsiphonAPIStatusRequestPeriodMin" - PsiphonAPIStatusRequestPeriodMax = "PsiphonAPIStatusRequestPeriodMax" - PsiphonAPIStatusRequestShortPeriodMin = "PsiphonAPIStatusRequestShortPeriodMin" - PsiphonAPIStatusRequestShortPeriodMax = "PsiphonAPIStatusRequestShortPeriodMax" - PsiphonAPIStatusRequestPaddingMinBytes = "PsiphonAPIStatusRequestPaddingMinBytes" - PsiphonAPIStatusRequestPaddingMaxBytes = "PsiphonAPIStatusRequestPaddingMaxBytes" - PsiphonAPIPersistentStatsMaxCount = "PsiphonAPIPersistentStatsMaxCount" - PsiphonAPIConnectedRequestPeriod = "PsiphonAPIConnectedRequestPeriod" - PsiphonAPIConnectedRequestRetryPeriod = "PsiphonAPIConnectedRequestRetryPeriod" - FetchSplitTunnelRoutesTimeout = "FetchSplitTunnelRoutesTimeout" - SplitTunnelRoutesURLFormat = "SplitTunnelRoutesURLFormat" - SplitTunnelRoutesSignaturePublicKey = "SplitTunnelRoutesSignaturePublicKey" - SplitTunnelDNSServer = "SplitTunnelDNSServer" - SplitTunnelClassificationTTL = "SplitTunnelClassificationTTL" - SplitTunnelClassificationMaxEntries = "SplitTunnelClassificationMaxEntries" - FetchUpgradeTimeout = "FetchUpgradeTimeout" - FetchUpgradeRetryPeriod = "FetchUpgradeRetryPeriod" - FetchUpgradeStalePeriod = "FetchUpgradeStalePeriod" - UpgradeDownloadURLs = "UpgradeDownloadURLs" - UpgradeDownloadClientVersionHeader = "UpgradeDownloadClientVersionHeader" - TotalBytesTransferredNoticePeriod = "TotalBytesTransferredNoticePeriod" - TotalBytesTransferredEmitMemoryMetrics = "TotalBytesTransferredEmitMemoryMetrics" - MeekDialDomainsOnly = "MeekDialDomainsOnly" - MeekLimitBufferSizes = "MeekLimitBufferSizes" - MeekCookieMaxPadding = "MeekCookieMaxPadding" - MeekFullReceiveBufferLength = "MeekFullReceiveBufferLength" - MeekReadPayloadChunkLength = "MeekReadPayloadChunkLength" - MeekLimitedFullReceiveBufferLength = "MeekLimitedFullReceiveBufferLength" - MeekLimitedReadPayloadChunkLength = "MeekLimitedReadPayloadChunkLength" - MeekMinPollInterval = "MeekMinPollInterval" - MeekMinPollIntervalJitter = "MeekMinPollIntervalJitter" - MeekMaxPollInterval = "MeekMaxPollInterval" - MeekMaxPollIntervalJitter = "MeekMaxPollIntervalJitter" - MeekPollIntervalMultiplier = "MeekPollIntervalMultiplier" - MeekPollIntervalJitter = "MeekPollIntervalJitter" - MeekApplyPollIntervalMultiplierProbability = "MeekApplyPollIntervalMultiplierProbability" - MeekRoundTripRetryDeadline = "MeekRoundTripRetryDeadline" - MeekRoundTripRetryMinDelay = "MeekRoundTripRetryMinDelay" - MeekRoundTripRetryMaxDelay = "MeekRoundTripRetryMaxDelay" - MeekRoundTripRetryMultiplier = "MeekRoundTripRetryMultiplier" - MeekRoundTripTimeout = "MeekRoundTripTimeout" - MeekTrafficShapingProbability = "MeekTrafficShapingProbability" - MeekTrafficShapingLimitProtocols = "MeekTrafficShapingLimitProtocols" - MeekMinTLSPadding = "MeekMinTLSPadding" - MeekMaxTLSPadding = "MeekMaxTLSPadding" - MeekMinLimitRequestPayloadLength = "MeekMinLimitRequestPayloadLength" - MeekMaxLimitRequestPayloadLength = "MeekMaxLimitRequestPayloadLength" - MeekRedialTLSProbability = "MeekRedialTLSProbability" - MeekAlternateCookieNameProbability = "MeekAlternateCookieNameProbability" - MeekAlternateContentTypeProbability = "MeekAlternateContentTypeProbability" - TransformHostNameProbability = "TransformHostNameProbability" - PickUserAgentProbability = "PickUserAgentProbability" - LivenessTestMinUpstreamBytes = "LivenessTestMinUpstreamBytes" - LivenessTestMaxUpstreamBytes = "LivenessTestMaxUpstreamBytes" - LivenessTestMinDownstreamBytes = "LivenessTestMinDownstreamBytes" - LivenessTestMaxDownstreamBytes = "LivenessTestMaxDownstreamBytes" - ReplayCandidateCount = "ReplayCandidateCount" - ReplayDialParametersTTL = "ReplayDialParametersTTL" - ReplayTargetUpstreamBytes = "ReplayTargetUpstreamBytes" - ReplayTargetDownstreamBytes = "ReplayTargetDownstreamBytes" - ReplayTargetTunnelDuration = "ReplayTargetTunnelDuration" - ReplayLaterRoundMoveToFrontProbability = "ReplayLaterRoundMoveToFrontProbability" - ReplayRetainFailedProbability = "ReplayRetainFailedProbability" - ReplayIgnoreChangedConfigState = "ReplayIgnoreChangedConfigState" - ReplayBPF = "ReplayBPF" - ReplaySSH = "ReplaySSH" - ReplayObfuscatorPadding = "ReplayObfuscatorPadding" - ReplayFragmentor = "ReplayFragmentor" - ReplayTLSProfile = "ReplayTLSProfile" - ReplayFronting = "ReplayFronting" - ReplayHostname = "ReplayHostname" - ReplayQUICVersion = "ReplayQUICVersion" - ReplayObfuscatedQUIC = "ReplayObfuscatedQUIC" - ReplayObfuscatedQUICNonceTransformer = "ReplayObfuscatedQUICNonceTransformer" - ReplayConjureRegistration = "ReplayConjureRegistration" - ReplayConjureTransport = "ReplayConjureTransport" - ReplayLivenessTest = "ReplayLivenessTest" - ReplayUserAgent = "ReplayUserAgent" - ReplayAPIRequestPadding = "ReplayAPIRequestPadding" - ReplayHoldOffTunnel = "ReplayHoldOffTunnel" - ReplayResolveParameters = "ReplayResolveParameters" - ReplayHTTPTransformerParameters = "ReplayHTTPTransformerParameters" - ReplayOSSHSeedTransformerParameters = "ReplayOSSHSeedTransformerParameters" - ReplayOSSHPrefix = "ReplayOSSHPrefix" - ReplayTLSFragmentClientHello = "ReplayTLSFragmentClientHello" - APIRequestUpstreamPaddingMinBytes = "APIRequestUpstreamPaddingMinBytes" - APIRequestUpstreamPaddingMaxBytes = "APIRequestUpstreamPaddingMaxBytes" - APIRequestDownstreamPaddingMinBytes = "APIRequestDownstreamPaddingMinBytes" - APIRequestDownstreamPaddingMaxBytes = "APIRequestDownstreamPaddingMaxBytes" - PersistentStatsMaxStoreRecords = "PersistentStatsMaxStoreRecords" - PersistentStatsMaxSendBytes = "PersistentStatsMaxSendBytes" - RecordRemoteServerListPersistentStatsProbability = "RecordRemoteServerListPersistentStatsProbability" - RecordFailedTunnelPersistentStatsProbability = "RecordFailedTunnelPersistentStatsProbability" - ServerEntryMinimumAgeForPruning = "ServerEntryMinimumAgeForPruning" - ApplicationParametersProbability = "ApplicationParametersProbability" - ApplicationParameters = "ApplicationParameters" - BPFServerTCPProgram = "BPFServerTCPProgram" - BPFServerTCPProbability = "BPFServerTCPProbability" - BPFClientTCPProgram = "BPFClientTCPProgram" - BPFClientTCPProbability = "BPFClientTCPProbability" - ServerPacketManipulationSpecs = "ServerPacketManipulationSpecs" - ServerProtocolPacketManipulations = "ServerProtocolPacketManipulations" - ServerPacketManipulationProbability = "ServerPacketManipulationProbability" - FeedbackUploadURLs = "FeedbackUploadURLs" - FeedbackEncryptionPublicKey = "FeedbackEncryptionPublicKey" - FeedbackTacticsWaitPeriod = "FeedbackTacticsWaitPeriod" - FeedbackUploadMaxAttempts = "FeedbackUploadMaxAttempts" - FeedbackUploadRetryMinDelaySeconds = "FeedbackUploadRetryMinDelaySeconds" - FeedbackUploadRetryMaxDelaySeconds = "FeedbackUploadRetryMaxDelaySeconds" - FeedbackUploadTimeoutSeconds = "FeedbackUploadTimeoutSeconds" - ServerReplayPacketManipulation = "ServerReplayPacketManipulation" - ServerReplayFragmentor = "ServerReplayFragmentor" - ServerReplayUnknownGeoIP = "ServerReplayUnknownGeoIP" - ServerReplayTTL = "ServerReplayTTL" - ServerReplayTargetWaitDuration = "ServerReplayTargetWaitDuration" - ServerReplayTargetTunnelDuration = "ServerReplayTargetTunnelDuration" - ServerReplayTargetUpstreamBytes = "ServerReplayTargetUpstreamBytes" - ServerReplayTargetDownstreamBytes = "ServerReplayTargetDownstreamBytes" - ServerReplayFailedCountThreshold = "ServerReplayFailedCountThreshold" - ServerBurstUpstreamDeadline = "ServerBurstUpstreamDeadline" - ServerBurstUpstreamTargetBytes = "ServerBurstUpstreamTargetBytes" - ServerBurstDownstreamDeadline = "ServerBurstDownstreamDeadline" - ServerBurstDownstreamTargetBytes = "ServerBurstDownstreamTargetBytes" - ClientBurstUpstreamDeadline = "ClientBurstUpstreamDeadline" - ClientBurstUpstreamTargetBytes = "ClientBurstUpstreamTargetBytes" - ClientBurstDownstreamDeadline = "ClientBurstDownstreamDeadline" - ClientBurstDownstreamTargetBytes = "ClientBurstDownstreamTargetBytes" - ConjureCachedRegistrationTTL = "ConjureCachedRegistrationTTL" - ConjureAPIRegistrarURL = "ConjureAPIRegistrarURL" - ConjureAPIRegistrarBidirectionalURL = "ConjureAPIRegistrarBidirectionalURL" - ConjureAPIRegistrarFrontingSpecs = "ConjureAPIRegistrarFrontingSpecs" - ConjureAPIRegistrarMinDelay = "ConjureAPIRegistrarMinDelay" - ConjureAPIRegistrarMaxDelay = "ConjureAPIRegistrarMaxDelay" - ConjureDecoyRegistrarProbability = "ConjureDecoyRegistrarProbability" - ConjureDecoyRegistrarWidth = "ConjureDecoyRegistrarWidth" - ConjureDecoyRegistrarMinDelay = "ConjureDecoyRegistrarMinDelay" - ConjureDecoyRegistrarMaxDelay = "ConjureDecoyRegistrarMaxDelay" - ConjureEnableIPv6Dials = "ConjureEnableIPv6Dials" - ConjureEnablePortRandomization = "ConjureEnablePortRandomization" - ConjureEnableRegistrationOverrides = "ConjureEnableRegistrationOverrides" - ConjureLimitTransportsProbability = "ConjureLimitTransportsProbability" - ConjureLimitTransports = "ConjureLimitTransports" - ConjureSTUNServerAddresses = "ConjureSTUNServerAddresses" - ConjureDTLSEmptyInitialPacketProbability = "ConjureDTLSEmptyInitialPacketProbability" - CustomHostNameRegexes = "CustomHostNameRegexes" - CustomHostNameProbability = "CustomHostNameProbability" - CustomHostNameLimitProtocols = "CustomHostNameLimitProtocols" - HoldOffTunnelMinDuration = "HoldOffTunnelMinDuration" - HoldOffTunnelMaxDuration = "HoldOffTunnelMaxDuration" - HoldOffTunnelProtocols = "HoldOffTunnelProtocols" - HoldOffTunnelFrontingProviderIDs = "HoldOffTunnelFrontingProviderIDs" - HoldOffTunnelProbability = "HoldOffTunnelProbability" - RestrictFrontingProviderIDs = "RestrictFrontingProviderIDs" - RestrictFrontingProviderIDsServerProbability = "RestrictFrontingProviderIDsServerProbability" - RestrictFrontingProviderIDsClientProbability = "RestrictFrontingProviderIDsClientProbability" - HoldOffDirectTunnelMinDuration = "HoldOffDirectTunnelMinDuration" - HoldOffDirectTunnelMaxDuration = "HoldOffDirectTunnelMaxDuration" - HoldOffDirectTunnelProviderRegions = "HoldOffDirectTunnelProviderRegions" - HoldOffDirectTunnelProbability = "HoldOffDirectTunnelProbability" - RestrictDirectProviderRegions = "RestrictDirectProviderRegions" - RestrictDirectProviderIDsServerProbability = "RestrictDirectProviderIDsServerProbability" - RestrictDirectProviderIDsClientProbability = "RestrictDirectProviderIDsClientProbability" - UpstreamProxyAllowAllServerEntrySources = "UpstreamProxyAllowAllServerEntrySources" - DestinationBytesMetricsASN = "DestinationBytesMetricsASN" - DNSResolverAttemptsPerServer = "DNSResolverAttemptsPerServer" - DNSResolverAttemptsPerPreferredServer = "DNSResolverAttemptsPerPreferredServer" - DNSResolverRequestTimeout = "DNSResolverRequestTimeout" - DNSResolverAwaitTimeout = "DNSResolverAwaitTimeout" - DNSResolverPreresolvedIPAddressCIDRs = "DNSResolverPreresolvedIPAddressCIDRs" - DNSResolverPreresolvedIPAddressProbability = "DNSResolverPreresolvedIPAddressProbability" - DNSResolverAlternateServers = "DNSResolverAlternateServers" - DNSResolverPreferredAlternateServers = "DNSResolverPreferredAlternateServers" - DNSResolverPreferAlternateServerProbability = "DNSResolverPreferAlternateServerProbability" - DNSResolverProtocolTransformSpecs = "DNSResolverProtocolTransformSpecs" - DNSResolverProtocolTransformScopedSpecNames = "DNSResolverProtocolTransformScopedSpecNames" - DNSResolverProtocolTransformProbability = "DNSResolverProtocolTransformProbability" - DNSResolverIncludeEDNS0Probability = "DNSResolverIncludeEDNS0Probability" - DNSResolverCacheExtensionInitialTTL = "DNSResolverCacheExtensionInitialTTL" - DNSResolverCacheExtensionVerifiedTTL = "DNSResolverCacheExtensionVerifiedTTL" - AddFrontingProviderPsiphonFrontingHeader = "AddFrontingProviderPsiphonFrontingHeader" - DirectHTTPProtocolTransformSpecs = "DirectHTTPProtocolTransformSpecs" - DirectHTTPProtocolTransformScopedSpecNames = "DirectHTTPProtocolTransformScopedSpecNames" - DirectHTTPProtocolTransformProbability = "DirectHTTPProtocolTransformProbability" - FrontedHTTPProtocolTransformSpecs = "FrontedHTTPProtocolTransformSpecs" - FrontedHTTPProtocolTransformScopedSpecNames = "FrontedHTTPProtocolTransformScopedSpecNames" - FrontedHTTPProtocolTransformProbability = "FrontedHTTPProtocolTransformProbability" - OSSHObfuscatorSeedTransformSpecs = "OSSHObfuscatorSeedTransformSpecs" - OSSHObfuscatorSeedTransformScopedSpecNames = "OSSHObfuscatorSeedTransformScopedSpecNames" - OSSHObfuscatorSeedTransformProbability = "OSSHObfuscatorSeedTransformProbability" - ObfuscatedQUICNonceTransformSpecs = "ObfuscatedQUICNonceTransformSpecs" - ObfuscatedQUICNonceTransformScopedSpecNames = "ObfuscatedQUICNonceTransformScopedSpecNames" - ObfuscatedQUICNonceTransformProbability = "ObfuscatedQUICNonceTransformProbability" - OSSHPrefixSpecs = "OSSHPrefixSpecs" - OSSHPrefixScopedSpecNames = "OSSHPrefixScopedSpecNames" - OSSHPrefixProbability = "OSSHPrefixProbability" - OSSHPrefixSplitMinDelay = "OSSHPrefixSplitMinDelay" - OSSHPrefixSplitMaxDelay = "OSSHPrefixSplitMaxDelay" - OSSHPrefixEnableFragmentor = "OSSHPrefixEnableFragmentor" - ServerOSSHPrefixSpecs = "ServerOSSHPrefixSpecs" - TLSTunnelTrafficShapingProbability = "TLSTunnelTrafficShapingProbability" - TLSTunnelMinTLSPadding = "TLSTunnelMinTLSPadding" - TLSTunnelMaxTLSPadding = "TLSTunnelMaxTLSPadding" - TLSFragmentClientHelloProbability = "TLSFragmentClientHelloProbability" - TLSFragmentClientHelloLimitProtocols = "TLSFragmentClientHelloLimitProtocols" - SteeringIPCacheTTL = "SteeringIPCacheTTL" - SteeringIPCacheMaxEntries = "SteeringIPCacheMaxEntries" - SteeringIPProbability = "SteeringIPProbability" - ServerDiscoveryStrategy = "ServerDiscoveryStrategy" + NetworkLatencyMultiplier = "NetworkLatencyMultiplier" + NetworkLatencyMultiplierMin = "NetworkLatencyMultiplierMin" + NetworkLatencyMultiplierMax = "NetworkLatencyMultiplierMax" + NetworkLatencyMultiplierLambda = "NetworkLatencyMultiplierLambda" + TacticsWaitPeriod = "TacticsWaitPeriod" + TacticsRetryPeriod = "TacticsRetryPeriod" + TacticsRetryPeriodJitter = "TacticsRetryPeriodJitter" + TacticsTimeout = "TacticsTimeout" + ConnectionWorkerPoolSize = "ConnectionWorkerPoolSize" + TunnelPoolSize = "TunnelPoolSize" + TunnelConnectTimeout = "TunnelConnectTimeout" + EstablishTunnelTimeout = "EstablishTunnelTimeout" + EstablishTunnelWorkTime = "EstablishTunnelWorkTime" + EstablishTunnelPausePeriod = "EstablishTunnelPausePeriod" + EstablishTunnelPausePeriodJitter = "EstablishTunnelPausePeriodJitter" + EstablishTunnelServerAffinityGracePeriod = "EstablishTunnelServerAffinityGracePeriod" + StaggerConnectionWorkersPeriod = "StaggerConnectionWorkersPeriod" + StaggerConnectionWorkersJitter = "StaggerConnectionWorkersJitter" + LimitIntensiveConnectionWorkers = "LimitIntensiveConnectionWorkers" + UpstreamProxyErrorMinWaitDuration = "UpstreamProxyErrorMinWaitDuration" + UpstreamProxyErrorMaxWaitDuration = "UpstreamProxyErrorMaxWaitDuration" + IgnoreHandshakeStatsRegexps = "IgnoreHandshakeStatsRegexps" + PrioritizeTunnelProtocolsProbability = "PrioritizeTunnelProtocolsProbability" + PrioritizeTunnelProtocols = "PrioritizeTunnelProtocols" + PrioritizeTunnelProtocolsCandidateCount = "PrioritizeTunnelProtocolsCandidateCount" + InitialLimitTunnelProtocolsProbability = "InitialLimitTunnelProtocolsProbability" + InitialLimitTunnelProtocols = "InitialLimitTunnelProtocols" + InitialLimitTunnelProtocolsCandidateCount = "InitialLimitTunnelProtocolsCandidateCount" + LimitTunnelProtocolsProbability = "LimitTunnelProtocolsProbability" + LimitTunnelProtocols = "LimitTunnelProtocols" + LimitTunnelDialPortNumbersProbability = "LimitTunnelDialPortNumbersProbability" + LimitTunnelDialPortNumbers = "LimitTunnelDialPortNumbers" + LimitTLSProfilesProbability = "LimitTLSProfilesProbability" + LimitTLSProfiles = "LimitTLSProfiles" + UseOnlyCustomTLSProfiles = "UseOnlyCustomTLSProfiles" + CustomTLSProfiles = "CustomTLSProfiles" + SelectRandomizedTLSProfileProbability = "SelectRandomizedTLSProfileProbability" + NoDefaultTLSSessionIDProbability = "NoDefaultTLSSessionIDProbability" + DisableFrontingProviderTLSProfiles = "DisableFrontingProviderTLSProfiles" + LimitQUICVersionsProbability = "LimitQUICVersionsProbability" + LimitQUICVersions = "LimitQUICVersions" + DisableFrontingProviderQUICVersions = "DisableFrontingProviderQUICVersions" + QUICDisableClientPathMTUDiscoveryProbability = "QUICDisableClientPathMTUDiscoveryProbability" + FragmentorProbability = "FragmentorProbability" + FragmentorLimitProtocols = "FragmentorLimitProtocols" + FragmentorMinTotalBytes = "FragmentorMinTotalBytes" + FragmentorMaxTotalBytes = "FragmentorMaxTotalBytes" + FragmentorMinWriteBytes = "FragmentorMinWriteBytes" + FragmentorMaxWriteBytes = "FragmentorMaxWriteBytes" + FragmentorMinDelay = "FragmentorMinDelay" + FragmentorMaxDelay = "FragmentorMaxDelay" + FragmentorDownstreamProbability = "FragmentorDownstreamProbability" + FragmentorDownstreamLimitProtocols = "FragmentorDownstreamLimitProtocols" + FragmentorDownstreamMinTotalBytes = "FragmentorDownstreamMinTotalBytes" + FragmentorDownstreamMaxTotalBytes = "FragmentorDownstreamMaxTotalBytes" + FragmentorDownstreamMinWriteBytes = "FragmentorDownstreamMinWriteBytes" + FragmentorDownstreamMaxWriteBytes = "FragmentorDownstreamMaxWriteBytes" + FragmentorDownstreamMinDelay = "FragmentorDownstreamMinDelay" + FragmentorDownstreamMaxDelay = "FragmentorDownstreamMaxDelay" + ObfuscatedSSHMinPadding = "ObfuscatedSSHMinPadding" + ObfuscatedSSHMaxPadding = "ObfuscatedSSHMaxPadding" + TunnelOperateShutdownTimeout = "TunnelOperateShutdownTimeout" + TunnelPortForwardDialTimeout = "TunnelPortForwardDialTimeout" + PacketTunnelReadTimeout = "PacketTunnelReadTimeout" + TunnelRateLimits = "TunnelRateLimits" + AdditionalCustomHeaders = "AdditionalCustomHeaders" + SpeedTestPaddingMinBytes = "SpeedTestPaddingMinBytes" + SpeedTestPaddingMaxBytes = "SpeedTestPaddingMaxBytes" + SpeedTestMaxSampleCount = "SpeedTestMaxSampleCount" + SSHKeepAliveSpeedTestSampleProbability = "SSHKeepAliveSpeedTestSampleProbability" + SSHKeepAlivePaddingMinBytes = "SSHKeepAlivePaddingMinBytes" + SSHKeepAlivePaddingMaxBytes = "SSHKeepAlivePaddingMaxBytes" + SSHKeepAlivePeriodMin = "SSHKeepAlivePeriodMin" + SSHKeepAlivePeriodMax = "SSHKeepAlivePeriodMax" + SSHKeepAlivePeriodicTimeout = "SSHKeepAlivePeriodicTimeout" + SSHKeepAlivePeriodicInactivePeriod = "SSHKeepAlivePeriodicInactivePeriod" + SSHKeepAliveProbeTimeout = "SSHKeepAliveProbeTimeout" + SSHKeepAliveProbeInactivePeriod = "SSHKeepAliveProbeInactivePeriod" + SSHKeepAliveNetworkConnectivityPollingPeriod = "SSHKeepAliveNetworkConnectivityPollingPeriod" + SSHKeepAliveResetOnFailureProbability = "SSHKeepAliveResetOnFailureProbability" + HTTPProxyOriginServerTimeout = "HTTPProxyOriginServerTimeout" + HTTPProxyMaxIdleConnectionsPerHost = "HTTPProxyMaxIdleConnectionsPerHost" + FetchRemoteServerListTimeout = "FetchRemoteServerListTimeout" + FetchRemoteServerListRetryPeriod = "FetchRemoteServerListRetryPeriod" + FetchRemoteServerListStalePeriod = "FetchRemoteServerListStalePeriod" + RemoteServerListSignaturePublicKey = "RemoteServerListSignaturePublicKey" + RemoteServerListURLs = "RemoteServerListURLs" + ObfuscatedServerListRootURLs = "ObfuscatedServerListRootURLs" + PsiphonAPIRequestTimeout = "PsiphonAPIRequestTimeout" + PsiphonAPIStatusRequestPeriodMin = "PsiphonAPIStatusRequestPeriodMin" + PsiphonAPIStatusRequestPeriodMax = "PsiphonAPIStatusRequestPeriodMax" + PsiphonAPIStatusRequestShortPeriodMin = "PsiphonAPIStatusRequestShortPeriodMin" + PsiphonAPIStatusRequestShortPeriodMax = "PsiphonAPIStatusRequestShortPeriodMax" + PsiphonAPIStatusRequestPaddingMinBytes = "PsiphonAPIStatusRequestPaddingMinBytes" + PsiphonAPIStatusRequestPaddingMaxBytes = "PsiphonAPIStatusRequestPaddingMaxBytes" + PsiphonAPIPersistentStatsMaxCount = "PsiphonAPIPersistentStatsMaxCount" + PsiphonAPIConnectedRequestPeriod = "PsiphonAPIConnectedRequestPeriod" + PsiphonAPIConnectedRequestRetryPeriod = "PsiphonAPIConnectedRequestRetryPeriod" + FetchSplitTunnelRoutesTimeout = "FetchSplitTunnelRoutesTimeout" + SplitTunnelRoutesURLFormat = "SplitTunnelRoutesURLFormat" + SplitTunnelRoutesSignaturePublicKey = "SplitTunnelRoutesSignaturePublicKey" + SplitTunnelDNSServer = "SplitTunnelDNSServer" + SplitTunnelClassificationTTL = "SplitTunnelClassificationTTL" + SplitTunnelClassificationMaxEntries = "SplitTunnelClassificationMaxEntries" + FetchUpgradeTimeout = "FetchUpgradeTimeout" + FetchUpgradeRetryPeriod = "FetchUpgradeRetryPeriod" + FetchUpgradeStalePeriod = "FetchUpgradeStalePeriod" + UpgradeDownloadURLs = "UpgradeDownloadURLs" + UpgradeDownloadClientVersionHeader = "UpgradeDownloadClientVersionHeader" + TotalBytesTransferredNoticePeriod = "TotalBytesTransferredNoticePeriod" + TotalBytesTransferredEmitMemoryMetrics = "TotalBytesTransferredEmitMemoryMetrics" + MeekDialDomainsOnly = "MeekDialDomainsOnly" + MeekLimitBufferSizes = "MeekLimitBufferSizes" + MeekCookieMaxPadding = "MeekCookieMaxPadding" + MeekFullReceiveBufferLength = "MeekFullReceiveBufferLength" + MeekReadPayloadChunkLength = "MeekReadPayloadChunkLength" + MeekLimitedFullReceiveBufferLength = "MeekLimitedFullReceiveBufferLength" + MeekLimitedReadPayloadChunkLength = "MeekLimitedReadPayloadChunkLength" + MeekMinPollInterval = "MeekMinPollInterval" + MeekMinPollIntervalJitter = "MeekMinPollIntervalJitter" + MeekMaxPollInterval = "MeekMaxPollInterval" + MeekMaxPollIntervalJitter = "MeekMaxPollIntervalJitter" + MeekPollIntervalMultiplier = "MeekPollIntervalMultiplier" + MeekPollIntervalJitter = "MeekPollIntervalJitter" + MeekApplyPollIntervalMultiplierProbability = "MeekApplyPollIntervalMultiplierProbability" + MeekRoundTripRetryDeadline = "MeekRoundTripRetryDeadline" + MeekRoundTripRetryMinDelay = "MeekRoundTripRetryMinDelay" + MeekRoundTripRetryMaxDelay = "MeekRoundTripRetryMaxDelay" + MeekRoundTripRetryMultiplier = "MeekRoundTripRetryMultiplier" + MeekRoundTripTimeout = "MeekRoundTripTimeout" + MeekTrafficShapingProbability = "MeekTrafficShapingProbability" + MeekTrafficShapingLimitProtocols = "MeekTrafficShapingLimitProtocols" + MeekMinTLSPadding = "MeekMinTLSPadding" + MeekMaxTLSPadding = "MeekMaxTLSPadding" + MeekMinLimitRequestPayloadLength = "MeekMinLimitRequestPayloadLength" + MeekMaxLimitRequestPayloadLength = "MeekMaxLimitRequestPayloadLength" + MeekRedialTLSProbability = "MeekRedialTLSProbability" + MeekAlternateCookieNameProbability = "MeekAlternateCookieNameProbability" + MeekAlternateContentTypeProbability = "MeekAlternateContentTypeProbability" + TransformHostNameProbability = "TransformHostNameProbability" + PickUserAgentProbability = "PickUserAgentProbability" + LivenessTestMinUpstreamBytes = "LivenessTestMinUpstreamBytes" + LivenessTestMaxUpstreamBytes = "LivenessTestMaxUpstreamBytes" + LivenessTestMinDownstreamBytes = "LivenessTestMinDownstreamBytes" + LivenessTestMaxDownstreamBytes = "LivenessTestMaxDownstreamBytes" + ReplayCandidateCount = "ReplayCandidateCount" + ReplayDialParametersTTL = "ReplayDialParametersTTL" + ReplayTargetUpstreamBytes = "ReplayTargetUpstreamBytes" + ReplayTargetDownstreamBytes = "ReplayTargetDownstreamBytes" + ReplayTargetTunnelDuration = "ReplayTargetTunnelDuration" + ReplayLaterRoundMoveToFrontProbability = "ReplayLaterRoundMoveToFrontProbability" + ReplayRetainFailedProbability = "ReplayRetainFailedProbability" + ReplayIgnoreChangedConfigState = "ReplayIgnoreChangedConfigState" + ReplayBPF = "ReplayBPF" + ReplaySSH = "ReplaySSH" + ReplayObfuscatorPadding = "ReplayObfuscatorPadding" + ReplayFragmentor = "ReplayFragmentor" + ReplayTLSProfile = "ReplayTLSProfile" + ReplayFronting = "ReplayFronting" + ReplayHostname = "ReplayHostname" + ReplayQUICVersion = "ReplayQUICVersion" + ReplayObfuscatedQUIC = "ReplayObfuscatedQUIC" + ReplayObfuscatedQUICNonceTransformer = "ReplayObfuscatedQUICNonceTransformer" + ReplayConjureRegistration = "ReplayConjureRegistration" + ReplayConjureTransport = "ReplayConjureTransport" + ReplayLivenessTest = "ReplayLivenessTest" + ReplayUserAgent = "ReplayUserAgent" + ReplayAPIRequestPadding = "ReplayAPIRequestPadding" + ReplayHoldOffTunnel = "ReplayHoldOffTunnel" + ReplayResolveParameters = "ReplayResolveParameters" + ReplayHTTPTransformerParameters = "ReplayHTTPTransformerParameters" + ReplayOSSHSeedTransformerParameters = "ReplayOSSHSeedTransformerParameters" + ReplayOSSHPrefix = "ReplayOSSHPrefix" + ReplayTLSFragmentClientHello = "ReplayTLSFragmentClientHello" + ReplayInproxyWebRTC = "ReplayInproxyWebRTC" + ReplayInproxySTUN = "ReplayInproxySTUN" + APIRequestUpstreamPaddingMinBytes = "APIRequestUpstreamPaddingMinBytes" + APIRequestUpstreamPaddingMaxBytes = "APIRequestUpstreamPaddingMaxBytes" + APIRequestDownstreamPaddingMinBytes = "APIRequestDownstreamPaddingMinBytes" + APIRequestDownstreamPaddingMaxBytes = "APIRequestDownstreamPaddingMaxBytes" + PersistentStatsMaxStoreRecords = "PersistentStatsMaxStoreRecords" + PersistentStatsMaxSendBytes = "PersistentStatsMaxSendBytes" + RecordRemoteServerListPersistentStatsProbability = "RecordRemoteServerListPersistentStatsProbability" + RecordFailedTunnelPersistentStatsProbability = "RecordFailedTunnelPersistentStatsProbability" + ServerEntryMinimumAgeForPruning = "ServerEntryMinimumAgeForPruning" + ApplicationParametersProbability = "ApplicationParametersProbability" + ApplicationParameters = "ApplicationParameters" + BPFServerTCPProgram = "BPFServerTCPProgram" + BPFServerTCPProbability = "BPFServerTCPProbability" + BPFClientTCPProgram = "BPFClientTCPProgram" + BPFClientTCPProbability = "BPFClientTCPProbability" + ServerPacketManipulationSpecs = "ServerPacketManipulationSpecs" + ServerProtocolPacketManipulations = "ServerProtocolPacketManipulations" + ServerPacketManipulationProbability = "ServerPacketManipulationProbability" + FeedbackUploadURLs = "FeedbackUploadURLs" + FeedbackEncryptionPublicKey = "FeedbackEncryptionPublicKey" + FeedbackTacticsWaitPeriod = "FeedbackTacticsWaitPeriod" + FeedbackUploadMaxAttempts = "FeedbackUploadMaxAttempts" + FeedbackUploadRetryMinDelaySeconds = "FeedbackUploadRetryMinDelaySeconds" + FeedbackUploadRetryMaxDelaySeconds = "FeedbackUploadRetryMaxDelaySeconds" + FeedbackUploadTimeoutSeconds = "FeedbackUploadTimeoutSeconds" + ServerReplayPacketManipulation = "ServerReplayPacketManipulation" + ServerReplayFragmentor = "ServerReplayFragmentor" + ServerReplayUnknownGeoIP = "ServerReplayUnknownGeoIP" + ServerReplayTTL = "ServerReplayTTL" + ServerReplayTargetWaitDuration = "ServerReplayTargetWaitDuration" + ServerReplayTargetTunnelDuration = "ServerReplayTargetTunnelDuration" + ServerReplayTargetUpstreamBytes = "ServerReplayTargetUpstreamBytes" + ServerReplayTargetDownstreamBytes = "ServerReplayTargetDownstreamBytes" + ServerReplayFailedCountThreshold = "ServerReplayFailedCountThreshold" + ServerBurstUpstreamDeadline = "ServerBurstUpstreamDeadline" + ServerBurstUpstreamTargetBytes = "ServerBurstUpstreamTargetBytes" + ServerBurstDownstreamDeadline = "ServerBurstDownstreamDeadline" + ServerBurstDownstreamTargetBytes = "ServerBurstDownstreamTargetBytes" + ClientBurstUpstreamDeadline = "ClientBurstUpstreamDeadline" + ClientBurstUpstreamTargetBytes = "ClientBurstUpstreamTargetBytes" + ClientBurstDownstreamDeadline = "ClientBurstDownstreamDeadline" + ClientBurstDownstreamTargetBytes = "ClientBurstDownstreamTargetBytes" + ConjureCachedRegistrationTTL = "ConjureCachedRegistrationTTL" + ConjureAPIRegistrarURL = "ConjureAPIRegistrarURL" + ConjureAPIRegistrarBidirectionalURL = "ConjureAPIRegistrarBidirectionalURL" + ConjureAPIRegistrarFrontingSpecs = "ConjureAPIRegistrarFrontingSpecs" + ConjureAPIRegistrarMinDelay = "ConjureAPIRegistrarMinDelay" + ConjureAPIRegistrarMaxDelay = "ConjureAPIRegistrarMaxDelay" + ConjureDecoyRegistrarProbability = "ConjureDecoyRegistrarProbability" + ConjureDecoyRegistrarWidth = "ConjureDecoyRegistrarWidth" + ConjureDecoyRegistrarMinDelay = "ConjureDecoyRegistrarMinDelay" + ConjureDecoyRegistrarMaxDelay = "ConjureDecoyRegistrarMaxDelay" + ConjureEnableIPv6Dials = "ConjureEnableIPv6Dials" + ConjureEnablePortRandomization = "ConjureEnablePortRandomization" + ConjureEnableRegistrationOverrides = "ConjureEnableRegistrationOverrides" + ConjureLimitTransportsProbability = "ConjureLimitTransportsProbability" + ConjureLimitTransports = "ConjureLimitTransports" + ConjureSTUNServerAddresses = "ConjureSTUNServerAddresses" + ConjureDTLSEmptyInitialPacketProbability = "ConjureDTLSEmptyInitialPacketProbability" + CustomHostNameRegexes = "CustomHostNameRegexes" + CustomHostNameProbability = "CustomHostNameProbability" + CustomHostNameLimitProtocols = "CustomHostNameLimitProtocols" + HoldOffTunnelMinDuration = "HoldOffTunnelMinDuration" + HoldOffTunnelMaxDuration = "HoldOffTunnelMaxDuration" + HoldOffTunnelProtocols = "HoldOffTunnelProtocols" + HoldOffTunnelFrontingProviderIDs = "HoldOffTunnelFrontingProviderIDs" + HoldOffTunnelProbability = "HoldOffTunnelProbability" + RestrictFrontingProviderIDs = "RestrictFrontingProviderIDs" + RestrictFrontingProviderIDsServerProbability = "RestrictFrontingProviderIDsServerProbability" + RestrictFrontingProviderIDsClientProbability = "RestrictFrontingProviderIDsClientProbability" + HoldOffDirectTunnelMinDuration = "HoldOffDirectTunnelMinDuration" + HoldOffDirectTunnelMaxDuration = "HoldOffDirectTunnelMaxDuration" + HoldOffDirectTunnelProviderRegions = "HoldOffDirectTunnelProviderRegions" + HoldOffDirectTunnelProbability = "HoldOffDirectTunnelProbability" + RestrictDirectProviderRegions = "RestrictDirectProviderRegions" + RestrictDirectProviderIDsServerProbability = "RestrictDirectProviderIDsServerProbability" + RestrictDirectProviderIDsClientProbability = "RestrictDirectProviderIDsClientProbability" + UpstreamProxyAllowAllServerEntrySources = "UpstreamProxyAllowAllServerEntrySources" + DestinationBytesMetricsASN = "DestinationBytesMetricsASN" + DNSResolverAttemptsPerServer = "DNSResolverAttemptsPerServer" + DNSResolverAttemptsPerPreferredServer = "DNSResolverAttemptsPerPreferredServer" + DNSResolverRequestTimeout = "DNSResolverRequestTimeout" + DNSResolverAwaitTimeout = "DNSResolverAwaitTimeout" + DNSResolverPreresolvedIPAddressCIDRs = "DNSResolverPreresolvedIPAddressCIDRs" + DNSResolverPreresolvedIPAddressProbability = "DNSResolverPreresolvedIPAddressProbability" + DNSResolverAlternateServers = "DNSResolverAlternateServers" + DNSResolverPreferredAlternateServers = "DNSResolverPreferredAlternateServers" + DNSResolverPreferAlternateServerProbability = "DNSResolverPreferAlternateServerProbability" + DNSResolverProtocolTransformSpecs = "DNSResolverProtocolTransformSpecs" + DNSResolverProtocolTransformScopedSpecNames = "DNSResolverProtocolTransformScopedSpecNames" + DNSResolverProtocolTransformProbability = "DNSResolverProtocolTransformProbability" + DNSResolverIncludeEDNS0Probability = "DNSResolverIncludeEDNS0Probability" + DNSResolverCacheExtensionInitialTTL = "DNSResolverCacheExtensionInitialTTL" + DNSResolverCacheExtensionVerifiedTTL = "DNSResolverCacheExtensionVerifiedTTL" + AddFrontingProviderPsiphonFrontingHeader = "AddFrontingProviderPsiphonFrontingHeader" + DirectHTTPProtocolTransformSpecs = "DirectHTTPProtocolTransformSpecs" + DirectHTTPProtocolTransformScopedSpecNames = "DirectHTTPProtocolTransformScopedSpecNames" + DirectHTTPProtocolTransformProbability = "DirectHTTPProtocolTransformProbability" + FrontedHTTPProtocolTransformSpecs = "FrontedHTTPProtocolTransformSpecs" + FrontedHTTPProtocolTransformScopedSpecNames = "FrontedHTTPProtocolTransformScopedSpecNames" + FrontedHTTPProtocolTransformProbability = "FrontedHTTPProtocolTransformProbability" + OSSHObfuscatorSeedTransformSpecs = "OSSHObfuscatorSeedTransformSpecs" + OSSHObfuscatorSeedTransformScopedSpecNames = "OSSHObfuscatorSeedTransformScopedSpecNames" + OSSHObfuscatorSeedTransformProbability = "OSSHObfuscatorSeedTransformProbability" + ObfuscatedQUICNonceTransformSpecs = "ObfuscatedQUICNonceTransformSpecs" + ObfuscatedQUICNonceTransformScopedSpecNames = "ObfuscatedQUICNonceTransformScopedSpecNames" + ObfuscatedQUICNonceTransformProbability = "ObfuscatedQUICNonceTransformProbability" + OSSHPrefixSpecs = "OSSHPrefixSpecs" + OSSHPrefixScopedSpecNames = "OSSHPrefixScopedSpecNames" + OSSHPrefixProbability = "OSSHPrefixProbability" + OSSHPrefixSplitMinDelay = "OSSHPrefixSplitMinDelay" + OSSHPrefixSplitMaxDelay = "OSSHPrefixSplitMaxDelay" + OSSHPrefixEnableFragmentor = "OSSHPrefixEnableFragmentor" + ServerOSSHPrefixSpecs = "ServerOSSHPrefixSpecs" + TLSTunnelTrafficShapingProbability = "TLSTunnelTrafficShapingProbability" + TLSTunnelMinTLSPadding = "TLSTunnelMinTLSPadding" + TLSTunnelMaxTLSPadding = "TLSTunnelMaxTLSPadding" + TLSFragmentClientHelloProbability = "TLSFragmentClientHelloProbability" + TLSFragmentClientHelloLimitProtocols = "TLSFragmentClientHelloLimitProtocols" + SteeringIPCacheTTL = "SteeringIPCacheTTL" + SteeringIPCacheMaxEntries = "SteeringIPCacheMaxEntries" + SteeringIPProbability = "SteeringIPProbability" + ServerDiscoveryStrategy = "ServerDiscoveryStrategy" + InproxyAllowProxy = "InproxyAllowProxy" + InproxyAllowClient = "InproxyAllowClient" + InproxyAllowDomainFrontedDestinations = "InproxyAllowDomainFrontedDestinations" + InproxyTunnelProtocolSelectionProbability = "InproxyTunnelProtocolSelectionProbability" + InproxyAllBrokerPublicKeys = "InproxyAllBrokerPublicKeys" + InproxyBrokerSpecs = "InproxyBrokerSpecs" + InproxyProxyBrokerSpecs = "InproxyProxyBrokerSpecs" + InproxyClientBrokerSpecs = "InproxyClientBrokerSpecs" + InproxyReplayBrokerDialParametersTTL = "InproxyReplayBrokerDialParametersTTL" + InproxyReplayBrokerUpdateFrequency = "InproxyReplayBrokerUpdateFrequency" + InproxyReplayBrokerDialParametersProbability = "InproxyReplayBrokerDialParametersProbability" + InproxyReplayBrokerRetainFailedProbability = "InproxyReplayBrokerRetainFailedProbability" + InproxyAllCommonCompartmentIDs = "InproxyAllCommonCompartmentIDs" + InproxyCommonCompartmentIDs = "InproxyCommonCompartmentIDs" + InproxyMaxCompartmentIDListLength = "InproxyMaxCompartmentIDListLength" + InproxyBrokerMatcherAnnouncementLimitEntryCount = "InproxyBrokerMatcherAnnouncementLimitEntryCount" + InproxyBrokerMatcherAnnouncementRateLimitQuantity = "InproxyBrokerMatcherAnnouncementRateLimitQuantity" + InproxyBrokerMatcherAnnouncementRateLimitInterval = "InproxyBrokerMatcherAnnouncementRateLimitInterval" + InproxyBrokerMatcherAnnouncementNonlimitedProxyIDs = "InproxyBrokerMatcherAnnouncementNonlimitedProxyIDs" + InproxyBrokerMatcherOfferLimitEntryCount = "InproxyBrokerMatcherOfferLimitEntryCount" + InproxyBrokerMatcherOfferRateLimitQuantity = "InproxyBrokerMatcherOfferRateLimitQuantity" + InproxyBrokerMatcherOfferRateLimitInterval = "InproxyBrokerMatcherOfferRateLimitInterval" + InproxyBrokerProxyAnnounceTimeout = "InproxyBrokerProxyAnnounceTimeout" + InproxyBrokerClientOfferTimeout = "InproxyBrokerClientOfferTimeout" + InproxyBrokerPendingServerRequestsTTL = "InproxyBrokerPendingServerRequestsTTL" + InproxySessionHandshakeRoundTripTimeout = "InproxySessionHandshakeRoundTripTimeout" + InproxyProxyAnnounceRequestTimeout = "InproxyProxyAnnounceRequestTimeout" + InproxyProxyAnnounceDelay = "InproxyProxyAnnounceDelay" + InproxyProxyAnnounceDelayJitter = "InproxyProxyAnnounceDelayJitter" + InproxyProxyAnswerRequestTimeout = "InproxyProxyAnswerRequestTimeout" + InproxyClientOfferRequestTimeout = "InproxyClientOfferRequestTimeout" + InproxyClientOfferRetryDelay = "InproxyClientOfferRetryDelay" + InproxyClientOfferRetryJitter = "InproxyClientOfferRetryJitter" + InproxyClientRelayedPacketRequestTimeout = "InproxyCloientRelayedPacketRequestTimeout" + InproxyBrokerRoundTripStatusCodeFailureThreshold = "InproxyBrokerRoundTripStatusCodeFailureThreshold" + InproxyDTLSRandomizationProbability = "InproxyDTLSRandomizationProbability" + InproxyDataChannelTrafficShapingProbability = "InproxyDataChannelTrafficShapingProbability" + InproxyDataChannelTrafficShapingParameters = "InproxyDataChannelTrafficShapingParameters" + InproxySTUNServerAddresses = "InproxySTUNServerAddresses" + InproxySTUNServerAddressesRFC5780 = "InproxySTUNServerAddressesRFC5780" + InproxyProxySTUNServerAddresses = "InproxyProxySTUNServerAddresses" + InproxyProxySTUNServerAddressesRFC5780 = "InproxyProxySTUNServerAddressesRFC5780" + InproxyClientSTUNServerAddresses = "InproxyClientSTUNServerAddresses" + InproxyClientSTUNServerAddressesRFC5780 = "InproxyClientSTUNServerAddressesRFC5780" + InproxyClientDiscoverNATProbability = "InproxyClientDiscoverNATProbability" + InproxyDisableSTUN = "InproxyDisableSTUN" + InproxyDisablePortMapping = "InproxyDisablePortMapping" + InproxyDisableInboundForMobileNetworks = "InproxyDisableInboundForMobileNetworks" + InproxyDisableIPv6ICECandidates = "InproxyDisableIPv6ICECandidates" + InproxyProxyDisableSTUN = "InproxyProxyDisableSTUN" + InproxyProxyDisablePortMapping = "InproxyProxyDisablePortMapping" + InproxyProxyDisableInboundForMobileNetworks = "InproxyProxyDisableInboundForMobileNetworks" + InproxyProxyDisableIPv6ICECandidates = "InproxyProxyDisableIPv6ICECandidates" + InproxyClientDisableSTUN = "InproxyClientDisableSTUN" + InproxyClientDisablePortMapping = "InproxyClientDisablePortMapping" + InproxyClientDisableInboundForMobileNetworks = "InproxyClientDisableInboundForMobileNetworks" + InproxyClientDisableIPv6ICECandidates = "InproxyClientDisableIPv6ICECandidates" + InproxyProxyDiscoverNATTimeout = "InproxyProxyDiscoverNATTimeout" + InproxyClientDiscoverNATTimeout = "InproxyClientDiscoverNATTimeout" + InproxyWebRTCAnswerTimeout = "InproxyWebRTCAnswerTimeout" + InproxyProxyWebRTCAwaitDataChannelTimeout = "InproxyProxyWebRTCAwaitDataChannelTimeout" + InproxyClientWebRTCAwaitDataChannelTimeout = "InproxyClientWebRTCAwaitDataChannelTimeout" + InproxyProxyDestinationDialTimeout = "InproxyProxyDestinationDialTimeout" + InproxyPsiphonAPIRequestTimeout = "InproxyPsiphonAPIRequestTimeout" + InproxyProxyTotalActivityNoticePeriod = "InproxyProxyTotalActivityNoticePeriod" // Retired parameters @@ -516,7 +583,7 @@ var defaultParameters = map[string]struct { RemoteServerListURLs: {value: TransferURLs{}}, ObfuscatedServerListRootURLs: {value: TransferURLs{}}, - PsiphonAPIRequestTimeout: {value: 20 * time.Second, minimum: 1 * time.Second, flags: useNetworkLatencyMultiplier}, + PsiphonAPIRequestTimeout: {value: 10 * time.Second, minimum: 1 * time.Second, flags: useNetworkLatencyMultiplier}, PsiphonAPIStatusRequestPeriodMin: {value: 5 * time.Minute, minimum: 1 * time.Second}, PsiphonAPIStatusRequestPeriodMax: {value: 10 * time.Minute, minimum: 1 * time.Second}, @@ -630,6 +697,8 @@ var defaultParameters = map[string]struct { ReplayOSSHSeedTransformerParameters: {value: true}, ReplayOSSHPrefix: {value: true}, ReplayTLSFragmentClientHello: {value: true}, + ReplayInproxyWebRTC: {value: true}, + ReplayInproxySTUN: {value: true}, APIRequestUpstreamPaddingMinBytes: {value: 0, minimum: 0}, APIRequestUpstreamPaddingMaxBytes: {value: 1024, minimum: 0}, @@ -785,6 +854,78 @@ var defaultParameters = map[string]struct { SteeringIPProbability: {value: 1.0, minimum: 0.0}, ServerDiscoveryStrategy: {value: "", flags: serverSideOnly}, + + // For inproxy tactics, there is no proxyOnly flag, since Psiphon apps may + // run both clients and inproxy proxies. + // + // Note: useNetworkLatencyMultiplier is not applied to request timeouts + // since timeouts are strictly enforced on the broker server-side. + + InproxyAllowProxy: {value: false}, + InproxyAllowClient: {value: false, flags: serverSideOnly}, + InproxyAllowDomainFrontedDestinations: {value: false, flags: serverSideOnly}, + InproxyTunnelProtocolSelectionProbability: {value: 0.5, minimum: 0.0}, + InproxyAllBrokerPublicKeys: {value: []string{}, flags: serverSideOnly}, + InproxyBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyProxyBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyClientBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyReplayBrokerDialParametersTTL: {value: 24 * time.Hour, minimum: time.Duration(0)}, + InproxyReplayBrokerUpdateFrequency: {value: 5 * time.Minute, minimum: time.Duration(0)}, + InproxyReplayBrokerDialParametersProbability: {value: 1.0, minimum: 0.0}, + InproxyReplayBrokerRetainFailedProbability: {value: 0.5, minimum: 0.0}, + InproxyAllCommonCompartmentIDs: {value: []string{}, flags: serverSideOnly}, + InproxyCommonCompartmentIDs: {value: InproxyCompartmentIDsValue{}}, + InproxyMaxCompartmentIDListLength: {value: 50, minimum: 0}, + InproxyBrokerMatcherAnnouncementLimitEntryCount: {value: 50, minimum: 0, flags: serverSideOnly}, + InproxyBrokerMatcherAnnouncementRateLimitQuantity: {value: 50, minimum: 0, flags: serverSideOnly}, + InproxyBrokerMatcherAnnouncementRateLimitInterval: {value: 1 * time.Minute, minimum: time.Duration(0), flags: serverSideOnly}, + InproxyBrokerMatcherAnnouncementNonlimitedProxyIDs: {value: []string{}, flags: serverSideOnly}, + InproxyBrokerMatcherOfferLimitEntryCount: {value: 10, minimum: 0, flags: serverSideOnly}, + InproxyBrokerMatcherOfferRateLimitQuantity: {value: 50, minimum: 0, flags: serverSideOnly}, + InproxyBrokerMatcherOfferRateLimitInterval: {value: 1 * time.Minute, minimum: time.Duration(0)}, + InproxyBrokerProxyAnnounceTimeout: {value: 2 * time.Minute, minimum: time.Duration(0), flags: serverSideOnly}, + InproxyBrokerClientOfferTimeout: {value: 10 * time.Second, minimum: time.Duration(0), flags: serverSideOnly}, + InproxyBrokerPendingServerRequestsTTL: {value: 60 * time.Second, minimum: time.Duration(0), flags: serverSideOnly}, + InproxySessionHandshakeRoundTripTimeout: {value: 10 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyProxyAnnounceRequestTimeout: {value: 2*time.Minute + 10*time.Second, minimum: time.Duration(0)}, + InproxyProxyAnnounceDelay: {value: 100 * time.Millisecond, minimum: time.Duration(0)}, + InproxyProxyAnnounceDelayJitter: {value: 0.5, minimum: 0.0}, + InproxyProxyAnswerRequestTimeout: {value: 10*time.Second + 10*time.Second, minimum: time.Duration(0)}, + InproxyClientOfferRequestTimeout: {value: 10*time.Second + 10*time.Second, minimum: time.Duration(0)}, + InproxyClientOfferRetryDelay: {value: 100 * time.Millisecond, minimum: time.Duration(0)}, + InproxyClientOfferRetryJitter: {value: 0.5, minimum: 0.0}, + InproxyClientRelayedPacketRequestTimeout: {value: 10 * time.Second, minimum: time.Duration(0)}, + InproxyBrokerRoundTripStatusCodeFailureThreshold: {value: 2 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyDTLSRandomizationProbability: {value: 0.5, minimum: 0.0}, + InproxyDataChannelTrafficShapingProbability: {value: 0.5, minimum: 0.0}, + InproxyDataChannelTrafficShapingParameters: {value: InproxyDataChannelTrafficShapingParametersValue{0, 10, 0, 1500, 0, 10, 1, 1500, 0.5}}, + InproxySTUNServerAddresses: {value: []string{}}, + InproxySTUNServerAddressesRFC5780: {value: []string{}}, + InproxyProxySTUNServerAddresses: {value: []string{}}, + InproxyProxySTUNServerAddressesRFC5780: {value: []string{}}, + InproxyClientSTUNServerAddresses: {value: []string{}}, + InproxyClientSTUNServerAddressesRFC5780: {value: []string{}}, + InproxyClientDiscoverNATProbability: {value: 0.0, minimum: 0.0}, + InproxyDisableSTUN: {value: false}, + InproxyDisablePortMapping: {value: false}, + InproxyDisableInboundForMobileNetworks: {value: false}, + InproxyDisableIPv6ICECandidates: {value: false}, + InproxyProxyDisableSTUN: {value: false}, + InproxyProxyDisablePortMapping: {value: false}, + InproxyProxyDisableInboundForMobileNetworks: {value: false}, + InproxyProxyDisableIPv6ICECandidates: {value: false}, + InproxyClientDisableSTUN: {value: false}, + InproxyClientDisablePortMapping: {value: false}, + InproxyClientDisableInboundForMobileNetworks: {value: false}, + InproxyClientDisableIPv6ICECandidates: {value: false}, + InproxyProxyDiscoverNATTimeout: {value: 10 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyClientDiscoverNATTimeout: {value: 10 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyWebRTCAnswerTimeout: {value: 20 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyProxyWebRTCAwaitDataChannelTimeout: {value: 30 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyClientWebRTCAwaitDataChannelTimeout: {value: 20 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyProxyDestinationDialTimeout: {value: 20 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, + InproxyPsiphonAPIRequestTimeout: {value: 10 * time.Second, minimum: 1 * time.Second, flags: useNetworkLatencyMultiplier}, + InproxyProxyTotalActivityNoticePeriod: {value: 5 * time.Minute, minimum: 1 * time.Second}, } // IsServerSideOnly indicates if the parameter specified by name is used @@ -813,7 +954,10 @@ func NewParameters( getValueLogger: getValueLogger, } - _, err := parameters.Set("", false) + // Note: validationFlags excludes ValidationServerSide, so the + // Parameters.Set checkInproxyLists logic is not invoked; however, all + // the relevent lists currently default to empty values. + _, err := parameters.Set("", 0) if err != nil { return nil, errors.Trace(err) } @@ -848,6 +992,21 @@ func makeDefaultParameters() (map[string]interface{}, error) { return parameters, nil } +const ( + + // ValidationSkipOnError indicates whether invalid tactics are to be + // skipped/omitted, or if Set should fail on invalid tactics. In some + // cases, clients set ValidationSkipOnError as older client code may + // download newer tactics which do not validate; while servers perform + // strict validation. + ValidationSkipOnError = 1 + + // ValidationServerSide indicates whether the validation is server-side, + // in which case additonal checks referencing serverSideOnly parameters + // are performed. + ValidationServerSide = 2 +) + // Set replaces the current parameters. First, a set of parameters are // initialized using the default values. Then, each applyParameters is applied // in turn, with the later instances having precedence. @@ -865,7 +1024,12 @@ func makeDefaultParameters() (map[string]interface{}, error) { // For use in logging, Set returns a count of the number of parameters applied // from each applyParameters. func (p *Parameters) Set( - tag string, skipOnError bool, applyParameters ...map[string]interface{}) ([]int, error) { + tag string, + validationFlags int, + applyParameters ...map[string]interface{}) ([]int, error) { + + skipOnError := (validationFlags & ValidationSkipOnError) != 0 + serverSide := (validationFlags & ValidationServerSide) != 0 makeTypedValue := func(templateValue, value interface{}) (interface{}, error) { @@ -1005,6 +1169,27 @@ func (p *Parameters) Set( } osshPrefixSpecs, _ := osshPrefixSpecsValue.(transforms.Specs) + // Special case: in-proxy broker public keys in InproxyBrokerSpecs must + // appear in InproxyAllBrokerPublicKeys; and inproxy common compartment + // IDs must appear in InproxyAllCommonCompartmentIDs. This check is + // server-side only as the "All" parameters are serverSideOnly. + + checkInproxyLists := !skipOnError && serverSide + + inproxyAllBrokerPublicKeysValue, err := getAppliedValue( + InproxyAllBrokerPublicKeys, parameters, applyParameters) + if err != nil { + return nil, errors.Trace(err) + } + inproxyAllBrokerPublicKeys, _ := inproxyAllBrokerPublicKeysValue.([]string) + + inproxyAllCommonCompartmentIDsValue, err := getAppliedValue( + InproxyAllCommonCompartmentIDs, parameters, applyParameters) + if err != nil { + return nil, errors.Trace(err) + } + inproxyAllCommonCompartmentIDs, _ := inproxyAllCommonCompartmentIDsValue.([]string) + for i := 0; i < len(applyParameters); i++ { count := 0 @@ -1161,7 +1346,11 @@ func (p *Parameters) Set( return nil, errors.Trace(err) } case FrontingSpecs: - err := v.Validate() + // By default, FrontingSpecs are not permitted to specify + // SkipVerify. This includes the ConjureAPIRegistrarFrontingSpecs + // case which uses MeekModePlaintextRoundTrip. + allowSkipVerify := false + err := v.Validate(allowSkipVerify) if err != nil { if skipOnError { continue @@ -1229,6 +1418,42 @@ func (p *Parameters) Set( return nil, errors.Trace(err) } } + case InproxyBrokerSpecsValue: + + var checkList *[]string + if checkInproxyLists && name == InproxyBrokerSpecs { + checkList = &inproxyAllBrokerPublicKeys + } + + err := v.Validate(checkList) + if err != nil { + if skipOnError { + continue + } + return nil, errors.Trace(err) + } + case InproxyCompartmentIDsValue: + + var checkList *[]string + if checkInproxyLists && name == InproxyCommonCompartmentIDs { + checkList = &inproxyAllCommonCompartmentIDs + } + + err := v.Validate(checkList) + if err != nil { + if skipOnError { + continue + } + return nil, errors.Trace(err) + } + case InproxyDataChannelTrafficShapingParametersValue: + err := v.Validate() + if err != nil { + if skipOnError { + continue + } + return nil, errors.Trace(err) + } } // Enforce any minimums. Assumes defaultParameters[name] @@ -1376,7 +1601,7 @@ func (p *parametersSnapshot) getValue(name string, target interface{}) { targetValue.Elem().Set(reflect.ValueOf(value)) } -// ParametersAccessor provides consistent, atomic access to parameter values. +// ParametersAccessor provides consistent, atomic access to parameter values. // Any customizations are applied transparently. type ParametersAccessor struct { snapshot *parametersSnapshot @@ -1421,10 +1646,17 @@ func (p ParametersAccessor) String(name string) string { return value } -func (p ParametersAccessor) Strings(name string) []string { - value := []string{} - p.snapshot.getValue(name, &value) - return value +// Strings returns a []string parameter value. If multiple parameter names are +// specified, the first name with a non-empty value is used. +func (p ParametersAccessor) Strings(names ...string) []string { + for _, name := range names { + value := []string{} + p.snapshot.getValue(name, &value) + if len(value) > 0 { + return value + } + } + return []string{} } // Int returns an int parameter value. @@ -1776,3 +2008,34 @@ func (p ParametersAccessor) ConjureTransports(name string) protocol.ConjureTrans p.snapshot.getValue(name, &value) return value } + +// InproxyBrokerSpecs returns a InproxyBrokerSpecs parameter value. If +// multiple parameter names are specified, the first name with a non-empty +// value is used. +func (p ParametersAccessor) InproxyBrokerSpecs(names ...string) InproxyBrokerSpecsValue { + for _, name := range names { + value := InproxyBrokerSpecsValue{} + p.snapshot.getValue(name, &value) + if len(value) > 0 { + return value + } + } + return InproxyBrokerSpecsValue{} +} + +// InproxyBrokerSpecs returns a InproxyBrokerSpecs parameter value. +func (p ParametersAccessor) InproxyCompartmentIDs(name string) InproxyCompartmentIDsValue { + value := InproxyCompartmentIDsValue{} + p.snapshot.getValue(name, &value) + return value +} + +// InproxyDataChannelTrafficShapingParameters returns a +// InproxyDataChannelTrafficShapingParameters parameter value. +func (p ParametersAccessor) InproxyDataChannelTrafficShapingParameters( + name string) InproxyDataChannelTrafficShapingParametersValue { + + value := InproxyDataChannelTrafficShapingParametersValue{} + p.snapshot.getValue(name, &value) + return value +} diff --git a/psiphon/common/parameters/parameters_test.go b/psiphon/common/parameters/parameters_test.go old mode 100755 new mode 100644 index bad842c04..03e5d40dc --- a/psiphon/common/parameters/parameters_test.go +++ b/psiphon/common/parameters/parameters_test.go @@ -196,6 +196,21 @@ func TestGetDefaultParameters(t *testing.T) { t.Fatalf("KeyStrings returned %+v expected %+v", g, strings) } } + case InproxyBrokerSpecsValue: + g := p.Get().InproxyBrokerSpecs(name) + if !reflect.DeepEqual(v, g) { + t.Fatalf("ConjureTransports returned %+v expected %+v", g, v) + } + case InproxyCompartmentIDsValue: + g := p.Get().InproxyCompartmentIDs(name) + if !reflect.DeepEqual(v, g) { + t.Fatalf("ConjureTransports returned %+v expected %+v", g, v) + } + case InproxyDataChannelTrafficShapingParametersValue: + g := p.Get().InproxyDataChannelTrafficShapingParameters(name) + if !reflect.DeepEqual(v, g) { + t.Fatalf("ConjureTransports returned %+v expected %+v", g, v) + } default: t.Fatalf("Unhandled default type: %s (%T)", name, defaults.value) } @@ -245,7 +260,7 @@ func TestOverrides(t *testing.T) { // No skip on error; should fail and not apply any changes - _, err = p.Set(tag, false, applyParameters) + _, err = p.Set(tag, 0, applyParameters) if err == nil { t.Fatalf("Set succeeded unexpectedly") } @@ -266,7 +281,7 @@ func TestOverrides(t *testing.T) { // Skip on error; should skip ConnectionWorkerPoolSize and apply InitialLimitTunnelProtocolsCandidateCount - counts, err := p.Set(tag, true, applyParameters) + counts, err := p.Set(tag, ValidationSkipOnError, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -296,7 +311,7 @@ func TestNetworkLatencyMultiplier(t *testing.T) { applyParameters := map[string]interface{}{"NetworkLatencyMultiplier": 2.0} - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -318,7 +333,7 @@ func TestCustomNetworkLatencyMultiplier(t *testing.T) { applyParameters := map[string]interface{}{"NetworkLatencyMultiplier": 2.0} - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -344,7 +359,7 @@ func TestLimitTunnelProtocolProbability(t *testing.T) { "LimitTunnelProtocols": tunnelProtocols, } - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -365,7 +380,7 @@ func TestLimitTunnelProtocolProbability(t *testing.T) { "LimitTunnelProtocols": tunnelProtocols, } - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -411,7 +426,7 @@ func TestLabeledLists(t *testing.T) { "DisableFrontingProviderQUICVersions": protocol.LabeledQUICVersions{"validLabel": quicVersions}, } - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -451,7 +466,7 @@ func TestCustomTLSProfiles(t *testing.T) { applyParameters := map[string]interface{}{ "CustomTLSProfiles": customTLSProfiles} - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } @@ -511,7 +526,7 @@ func TestApplicationParameters(t *testing.T) { t.Fatalf("NewParameters failed: %s", err) } - _, err = p.Set("", false, applyParameters) + _, err = p.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %s", err) } diff --git a/psiphon/common/parameters/transferURLs.go b/psiphon/common/parameters/transferURLs.go old mode 100755 new mode 100644 index e8da33975..3857391ea --- a/psiphon/common/parameters/transferURLs.go +++ b/psiphon/common/parameters/transferURLs.go @@ -73,6 +73,15 @@ func (t TransferURLs) DecodeAndValidate() error { hasOnlyAfterZero := false for _, transferURL := range t { + + // TransferURL FrontingSpecs are permitted to specify SkipVerify + // because transfers have additional security at the payload level. + allowSkipVerify := true + err := transferURL.FrontingSpecs.Validate(allowSkipVerify) + if err != nil { + return errors.Trace(err) + } + if transferURL.OnlyAfterAttempts == 0 { hasOnlyAfterZero = true } diff --git a/psiphon/common/parameters/transferURLs_test.go b/psiphon/common/parameters/transferURLs_test.go old mode 100755 new mode 100644 index 8350b4bdb..514a53829 --- a/psiphon/common/parameters/transferURLs_test.go +++ b/psiphon/common/parameters/transferURLs_test.go @@ -65,6 +65,49 @@ func TestTransferURLs(t *testing.T) { decodedA, 1, }, + { + "single URL, multiple attempts, fronting spec", + TransferURLs{ + { + URL: encodedA, + OnlyAfterAttempts: 0, + FrontingSpecs: []*FrontingSpec{ + { + FrontingProviderID: "frontingProvider", + Addresses: []string{"example.org"}, + VerifyServerName: "example.com", + Host: "example.org", + SkipVerify: false, + }, + }, + }, + }, + 2, + true, + decodedA, + 1, + }, + { + "single URL, multiple attempts, fronting spec, skip verify set", + TransferURLs{ + { + URL: encodedA, + OnlyAfterAttempts: 0, + FrontingSpecs: []*FrontingSpec{ + { + FrontingProviderID: "frontingProvider", + Addresses: []string{"example.org"}, + Host: "example.org", + SkipVerify: true, + }, + }, + }, + }, + 2, + true, + decodedA, + 1, + }, { "multiple URLs, single attempt", TransferURLs{ diff --git a/psiphon/common/prng/prng.go b/psiphon/common/prng/prng.go index a68e3057a..df6c54139 100644 --- a/psiphon/common/prng/prng.go +++ b/psiphon/common/prng/prng.go @@ -195,7 +195,7 @@ func (p *PRNG) Int63() int64 { return int64(i & (1<<63 - 1)) } -// Int63 is equivilent to math/rand.Uint64. +// Uint64 is equivilent to math/rand.Uint64. func (p *PRNG) Uint64() uint64 { var b [8]byte p.Read(b[:]) diff --git a/psiphon/common/profiles_test.go b/psiphon/common/profiles_test.go index a28bfefbc..ed38b4faf 100644 --- a/psiphon/common/profiles_test.go +++ b/psiphon/common/profiles_test.go @@ -53,6 +53,10 @@ func (logger *testLogger) LogMetric(metric string, fields LogFields) { panic("unexpected log call") } +func (logger *testLogger) IsLogLevelDebug() bool { + return true +} + type testLoggerTrace struct { } diff --git a/psiphon/common/protocol/packed.go b/psiphon/common/protocol/packed.go new file mode 100644 index 000000000..8c1bc2f7f --- /dev/null +++ b/psiphon/common/protocol/packed.go @@ -0,0 +1,891 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package protocol + +import ( + "encoding" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/accesscontrol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/fxamacker/cbor/v2" +) + +// PackedAPIParameters is a compacted representation of common.APIParameters +// using integer keys in place of string keys, and with some values +// represented in compacted form, such as byte slices in place of hex or +// base64 strings. +// +// The PackedAPIParameters representation is intended to be used to create +// compacted, CBOR encodings of API parameters. +type PackedAPIParameters map[int]interface{} + +// EncodePackedAPIParameters converts common.APIParameters to +// PackedAPIParameters. +func EncodePackedAPIParameters(params common.APIParameters) (PackedAPIParameters, error) { + packedParams := PackedAPIParameters{} + for name, value := range params { + spec, ok := packedAPIParametersNameToSpec[name] + if !ok { + // The API parameter to be packed is not in + // packedAPIParametersNameToSpec. This will occur if + // packedAPIParametersNameToSpec is not updated when new API + // parameters are added. Fail the operation and, ultimately, the + // dial rather than proceeding without the parameter. + return nil, errors.Tracef("unknown parameter name: %s", name) + + } + if spec.converter != nil { + var err error + value, err = spec.converter.pack(value) + if err != nil { + return nil, errors.Tracef( + "pack %s (%T) failed: %v", name, params[name], err) + } + } + if _, ok := packedParams[spec.key]; ok { + // This is a sanity check and shouldn't happen unless + // packedAPIParametersNameToSpec is misconfigured. + return nil, errors.TraceNew("duplicate parameter") + } + packedParams[spec.key] = value + } + return packedParams, nil +} + +// DecodePackedAPIParameters converts PackedAPIParameters to +// common.APIParameters +func DecodePackedAPIParameters(packedParams PackedAPIParameters) (common.APIParameters, error) { + params := common.APIParameters{} + for key, value := range packedParams { + spec, ok := packedAPIParametersKeyToSpec[key] + if !ok { + // The API parameter received is not in + // packedAPIParametersNameToInt. Skip logging it and proceed. + // This allows for production psiphond/broker instances to handle + // experimental clients which ship new parameters, and matches + // the legacy JSON-encoded API parameters behavior. + continue + } + if spec.converter != nil { + var err error + value, err = spec.converter.unpack(value) + if err != nil { + return nil, errors.Tracef( + "unpack %s (%T) failed: %v", spec.name, packedParams[key], err) + } + } + if _, ok := params[spec.name]; ok { + // This is a sanity check and shouldn't happen unless + // packedAPIParametersKeyToSpec is misconfigured. + return nil, errors.TraceNew("duplicate parameter") + } + params[spec.name] = value + } + return params, nil +} + +// GetNetworkType returns the "network_type" API parameter value, if present. +func (p PackedAPIParameters) GetNetworkType() (string, bool) { + spec, ok := packedAPIParametersNameToSpec["network_type"] + if !ok { + return "", false + } + value, ok := p[spec.key] + if !ok { + return "", false + } + networkType, ok := value.(string) + if !ok { + return "", false + } + return networkType, true +} + +// MakePackedAPIParametersRequestPayload converts common.APIParameters to +// PackedAPIParameters and encodes the packed parameters as CBOR data. +func MakePackedAPIParametersRequestPayload( + params common.APIParameters) ([]byte, error) { + + packedParams, err := EncodePackedAPIParameters(params) + if err != nil { + return nil, errors.Trace(err) + } + + payload, err := CBOREncoding.Marshal(packedParams) + if err != nil { + return nil, errors.Trace(err) + } + + payload = addPackedAPIParametersPreamble(payload) + + return payload, nil +} + +// GetPackedAPIParametersRequestPayload decodes the CBOR payload and converts +// the PackedAPIParameters to common.APIParameters. +// +// GetPackedAPIParametersRequestPayload returns false and a nil error if the +// input payload is not CBOR data, which is the case for legacy JSON +// payloads. +func GetPackedAPIParametersRequestPayload( + payload []byte) (common.APIParameters, bool, error) { + + payload, ok := isPackedAPIParameters(payload) + if !ok { + return nil, false, nil + } + + var packedParams PackedAPIParameters + err := cbor.Unmarshal(payload, &packedParams) + if err != nil { + return nil, false, errors.Trace(err) + } + + params, err := DecodePackedAPIParameters(packedParams) + if err != nil { + return nil, false, errors.Trace(err) + } + + return params, true, nil +} + +const ( + packedAPIParametersDistinguisher = byte(0) + packedAPIParametersVersion = byte(1) +) + +func addPackedAPIParametersPreamble(payload []byte) []byte { + + var preamble [2]byte + + // Use a simple 0 byte to distinguish payloads from JSON. + preamble[0] = packedAPIParametersDistinguisher + + // Add a version tag, for future protocol changes. + preamble[1] = packedAPIParametersVersion + + // Attempt to use the input buffer, which will avoid an allocation if it + // has sufficient capacity. + payload = append(payload, preamble[:]...) + copy(payload[2:], payload[:len(payload)-2]) + copy(payload[0:2], preamble[:]) + + return payload +} + +func isPackedAPIParameters(payload []byte) ([]byte, bool) { + if len(payload) < 2 { + return nil, false + } + if payload[0] != packedAPIParametersDistinguisher { + return nil, false + } + if payload[1] != packedAPIParametersVersion { + return nil, false + } + + return payload[2:], true +} + +// PackedServerEntryFields is a compacted representation of ServerEntryFields +// using integer keys in place of string keys, and with some values +// represented in compacted form, such as byte slices in place of hex or +// base64 strings. +// +// The PackedServerEntryFields representation is intended to be used in +// CBOR-encoded messages, including in-proxy broker requests. +// +// To support older clients encoding signed server entries with new, +// unrecognized fields, the encoded structure includes a list of packed +// fields, Fields, and a list of raw, unpacked fields, UnrecognizedFields. +type PackedServerEntryFields struct { + Fields map[int]interface{} `cbor:"1,keyasint,omitempty"` + UnrecognizedFields map[string]interface{} `cbor:"2,keyasint,omitempty"` +} + +// EncodePackedServerEntryFields converts serverEntryFields to +// PackedServerEntryFields. +func EncodePackedServerEntryFields( + serverEntryFields ServerEntryFields) (PackedServerEntryFields, error) { + + // An allocated but empty UnrecognizedFields should be omitted from any + // CBOR encoding, taking no space. + packedServerEntry := PackedServerEntryFields{ + Fields: make(map[int]interface{}), + UnrecognizedFields: make(map[string]interface{}), + } + for name, value := range serverEntryFields { + spec, ok := packedServerEntryFieldsNameToSpec[name] + if !ok { + // Add unrecognized fields to the unpacked UnrecognizedFields set. + if _, ok := packedServerEntry.UnrecognizedFields[name]; ok { + // This is a sanity check and shouldn't happen. + return PackedServerEntryFields{}, errors.TraceNew("duplicate field") + } + packedServerEntry.UnrecognizedFields[name] = value + continue + } + if spec.converter != nil { + var err error + value, err = spec.converter.pack(value) + if err != nil { + return PackedServerEntryFields{}, errors.Tracef( + "pack %s (%T) failed: %v", name, serverEntryFields[name], err) + } + } + if _, ok := packedServerEntry.Fields[spec.key]; ok { + // This is a sanity check and shouldn't happen unless + // packedServerEntryFieldsNameToSpec is misconfigured. + return PackedServerEntryFields{}, errors.TraceNew("duplicate field") + } + packedServerEntry.Fields[spec.key] = value + } + return packedServerEntry, nil +} + +// DecodePackedServerEntryFields converts PackedServerEntryFields to +// ServerEntryFields. +func DecodePackedServerEntryFields( + packedServerEntryFields PackedServerEntryFields) (ServerEntryFields, error) { + + serverEntryFields := ServerEntryFields{} + for key, value := range packedServerEntryFields.Fields { + spec, ok := packedServerEntryFieldsKeyToSpec[key] + if !ok { + // Unlike DecodePackedAPIParameters, unknown fields cannot be + // ignored as they may be part of the server entry digital + // signature. Production psiphond/broker instances must be + // updated to handle new server entry fields. + return nil, errors.Tracef("unknown field key: %d", key) + } + if spec.converter != nil { + var err error + value, err = spec.converter.unpack(value) + if err != nil { + return nil, errors.Tracef( + "unpack %s (%T) failed: %v", + spec.name, packedServerEntryFields.Fields[key], err) + } + } + if _, ok := serverEntryFields[spec.name]; ok { + // This is a sanity check and shouldn't happen unless + // packedServerEntryFieldsKeyToSpec is misconfigured. + return nil, errors.TraceNew("duplicate field") + } + serverEntryFields[spec.name] = value + } + for name, value := range packedServerEntryFields.UnrecognizedFields { + if _, ok := serverEntryFields[name]; ok { + // This is a sanity check and shouldn't happen. + return nil, errors.TraceNew("duplicate field") + } + serverEntryFields[name] = value + } + return serverEntryFields, nil +} + +type packSpec struct { + key int + name string + converter *packConverter +} + +// packConverter defines an optional pack/unpack transformation to further +// reduce encoding overhead. For example, fields that are expected to be hex +// strings may be converted to byte slices, and then back again; integer +// strings are converted to actual integers; etc.. +type packConverter struct { + pack func(interface{}) (interface{}, error) + unpack func(interface{}) (interface{}, error) +} + +func packInt(v interface{}) (interface{}, error) { + switch value := v.(type) { + case string: + i, err := strconv.Atoi(value) + if err != nil { + return nil, errors.Trace(err) + } + return i, nil + case float64: + // Decoding server entry JSON from the local datastore may produce + // float64 field types. + return int(value), nil + default: + return nil, errors.TraceNew( + "expected string or float type") + } +} + +func unpackInt(v interface{}) (interface{}, error) { + switch i := v.(type) { + case int: + return strconv.FormatInt(int64(i), 10), nil + case int64: + return strconv.FormatInt(i, 10), nil + case uint64: + return strconv.FormatUint(i, 10), nil + default: + return nil, errors.TraceNew( + "expected int, int64, or uint64 type") + } +} + +func packFloat(v interface{}) (interface{}, error) { + switch value := v.(type) { + case string: + i, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, errors.Trace(err) + } + return i, nil + case float64: + return value, nil + default: + return nil, errors.TraceNew( + "expected string or float type") + } +} + +func unpackFloat(v interface{}) (interface{}, error) { + f, ok := v.(float64) + if !ok { + return nil, errors.TraceNew("expected int type") + } + return fmt.Sprintf("%f", f), nil +} + +func packHex(v interface{}) (interface{}, error) { + // Accept a type that is either a string, or implements MarshalText + // returning a string. The resulting string must be hex encoded. + s, err := stringOrTextMarshal(v) + if err != nil { + return nil, errors.Trace(err) + } + b, err := hex.DecodeString(s) + if err != nil { + return nil, errors.Trace(err) + } + return b, nil +} + +func unpackHexLower(v interface{}) (interface{}, error) { + b, ok := v.([]byte) + if !ok { + return nil, errors.TraceNew("expected []byte type") + } + return hex.EncodeToString(b), nil +} + +func unpackHexUpper(v interface{}) (interface{}, error) { + s, err := unpackHexLower(v) + if err != nil { + return nil, errors.Trace(err) + } + return strings.ToUpper(s.(string)), nil +} + +func packBase64(v interface{}) (interface{}, error) { + // Accept a type that is either a string, or implements MarshalText + // returning a string. The resulting string must be base64 encoded. + s, err := stringOrTextMarshal(v) + if err != nil { + return nil, errors.Trace(err) + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, errors.Trace(err) + } + return b, nil +} + +func unpackBase64(v interface{}) (interface{}, error) { + b, ok := v.([]byte) + if !ok { + return nil, errors.TraceNew("expected []byte type") + } + return base64.StdEncoding.EncodeToString(b), nil +} + +func packUnpaddedBase64(v interface{}) (interface{}, error) { + // Accept a type that is either a string, or implements MarshalText + // returning a string. The resulting string must be base64 encoded + // (unpadded). + s, err := stringOrTextMarshal(v) + if err != nil { + return nil, errors.Trace(err) + } + b, err := base64.RawStdEncoding.DecodeString(s) + if err != nil { + return nil, errors.Trace(err) + } + return b, nil +} + +func unpackUnpaddedBase64(v interface{}) (interface{}, error) { + b, ok := v.([]byte) + if !ok { + return nil, errors.TraceNew("expected []byte type") + } + return base64.RawStdEncoding.EncodeToString(b), nil +} + +func packAuthorizations(v interface{}) (interface{}, error) { + auths, ok := v.([]string) + if !ok { + return nil, errors.TraceNew("expected []string type") + } + packedAuths, err := accesscontrol.PackAuthorizations(auths, CBOREncoding) + if err != nil { + return nil, errors.Trace(err) + } + return packedAuths, nil +} + +func unpackAuthorizations(v interface{}) (interface{}, error) { + packedAuths, ok := v.([]byte) + if !ok { + return nil, errors.TraceNew("expected []byte type") + } + auths, err := accesscontrol.UnpackAuthorizations(packedAuths) + if err != nil { + return nil, errors.Trace(err) + } + return auths, nil +} + +func packNoop(v interface{}) (interface{}, error) { + return v, nil +} + +func unpackRawJSON(v interface{}) (interface{}, error) { + + // For compatibility with the legacy JSON encoding as used in the status + // API request payload, where the input is pre-JSON-marshaling + // json.RawMessage (so use packNoop) and the output is expected to be an + // unmarshaled JSON decoded object; e.g., map[string]interface{}. + + packedRawJSON, ok := v.([]byte) + if !ok { + return nil, errors.TraceNew("expected []byte type") + } + var unmarshaledJSON map[string]interface{} + err := json.Unmarshal(packedRawJSON, &unmarshaledJSON) + if err != nil { + return nil, errors.Trace(err) + } + return unmarshaledJSON, nil +} + +func unpackSliceOfJSONCompatibleMaps(v interface{}) (interface{}, error) { + + // For compatibility with the legacy JSON encoding as used for tactics + // speed test sample parameters. This converts CBOR maps of type map + // [interface{}]interface{} to JSON-compatible maps of type map + // [string]interface{}. + + if v == nil { + return nil, nil + } + + packedEntries, ok := v.([]interface{}) + if !ok { + return nil, errors.TraceNew("expected []interface{} type") + } + + entries := make([]map[string]interface{}, len(packedEntries)) + + for i, packedEntry := range packedEntries { + entry, ok := packedEntry.(map[interface{}]interface{}) + if !ok { + return nil, errors.TraceNew("expected map[interface{}]interface{} type") + } + entries[i] = make(map[string]interface{}) + for key, value := range entry { + strKey, ok := key.(string) + if !ok { + return nil, errors.TraceNew("expected string type") + } + entries[i][strKey] = value + } + } + + return entries, nil +} + +func stringOrTextMarshal(v interface{}) (string, error) { + switch value := v.(type) { + case string: + return value, nil + case encoding.TextMarshaler: + bytes, err := value.MarshalText() + if err != nil { + return "", errors.Trace(err) + } + return string(bytes), nil + default: + return "", errors.TraceNew( + "expected string or TextMarshaler type") + } +} + +var ( + + // All of the following variables should be read-only after + // initialization, due to concurrent access. + + packedAPIParametersNameToSpec = make(map[string]packSpec) + packedAPIParametersKeyToSpec = make(map[int]packSpec) + + packedServerEntryFieldsNameToSpec = make(map[string]packSpec) + packedServerEntryFieldsKeyToSpec = make(map[int]packSpec) + + intConverter = &packConverter{packInt, unpackInt} + floatConverter = &packConverter{packFloat, unpackFloat} + lowerHexConverter = &packConverter{packHex, unpackHexLower} + upperHexConverter = &packConverter{packHex, unpackHexUpper} + base64Converter = &packConverter{packBase64, unpackBase64} + unpaddedBase64Converter = &packConverter{packUnpaddedBase64, unpackUnpaddedBase64} + authorizationsConverter = &packConverter{packAuthorizations, unpackAuthorizations} + rawJSONConverter = &packConverter{packNoop, unpackRawJSON} + compatibleJSONMapConverter = &packConverter{packNoop, unpackSliceOfJSONCompatibleMaps} +) + +func init() { + + // Packed API parameters + // + // - must be appended to when server entry fields are added; existing key + // values cannot be reordered or reused. + // + // - limitation: use of converters means secrets/passwords/IDs are locked + // in as upper or lower hex with even digits, etc. + // + // - while not currently the case, if different API requests have the same + // input field name with different types, the nil converter must be used. + + packedAPIParameterSpecs := []packSpec{ + + // Specs: protocol.PSIPHON_API_HANDSHAKE_AUTHORIZATIONS + + {1, "authorizations", authorizationsConverter}, + + // Specs: + // tactics.SPEED_TEST_SAMPLES_PARAMETER_NAME + // tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME + // tactics.STORED_TACTICS_TAG_PARAMETER_NAME + + {2, "stored_tactics_tag", lowerHexConverter}, + {3, "speed_test_samples", compatibleJSONMapConverter}, + {4, "applied_tactics_tag", lowerHexConverter}, + + // Specs: server.baseParams + // + // - client_build_rev does not use a hex converter since some values + // are a non-even length prefix of a commit hash hex. + + {5, "client_session_id", lowerHexConverter}, + {6, "propagation_channel_id", upperHexConverter}, + {7, "sponsor_id", upperHexConverter}, + {8, "client_version", intConverter}, + {9, "client_platform", nil}, + {10, "client_features", nil}, + {11, "client_build_rev", nil}, + {12, "device_region", nil}, + {13, "device_location", nil}, + + // Specs: server.baseSessionParams + + {14, "session_id", lowerHexConverter}, + + // Specs: server.baseDialParams + // + // - intConverter is used for boolean fields as those are "0"/"1" + // string values by legacy convention. + // + // - the `padding` field is not packed since it is intended to pad the + // encoded message to its existing size. + + {15, "relay_protocol", nil}, + {16, "ssh_client_version", nil}, + {17, "upstream_proxy_type", nil}, + {18, "upstream_proxy_custom_header_names", nil}, + {19, "fronting_provider_id", upperHexConverter}, + {20, "meek_dial_address", nil}, + {21, "meek_resolved_ip_address", nil}, + {22, "meek_sni_server_name", nil}, + {23, "meek_host_header", nil}, + {24, "meek_transformed_host_name", intConverter}, + {25, "user_agent", nil}, + {26, "tls_profile", nil}, + {27, "tls_version", nil}, + {28, "server_entry_region", nil}, + {29, "server_entry_source", nil}, + {30, "server_entry_timestamp", nil}, + {31, "dial_port_number", intConverter}, + {32, "quic_version", nil}, + {33, "quic_dial_sni_address", nil}, + {34, "quic_disable_client_path_mtu_discovery", intConverter}, + {35, "upstream_bytes_fragmented", intConverter}, + {36, "upstream_min_bytes_written", intConverter}, + {37, "upstream_max_bytes_written", intConverter}, + {38, "upstream_min_delayed", intConverter}, + {39, "upstream_max_delayed", intConverter}, + {40, "padding", nil}, + {41, "pad_response", intConverter}, + {42, "is_replay", intConverter}, + {43, "egress_region", nil}, + {44, "dial_duration", intConverter}, + {45, "candidate_number", intConverter}, + {46, "established_tunnels_count", intConverter}, + {47, "upstream_ossh_padding", intConverter}, + {48, "meek_cookie_size", intConverter}, + {49, "meek_limit_request", intConverter}, + {50, "meek_redial_probability", floatConverter}, + {51, "meek_tls_padding", intConverter}, + {52, "network_latency_multiplier", floatConverter}, + {53, "client_bpf", nil}, + {54, "network_type", nil}, + {55, "conjure_cached", nil}, + {56, "conjure_delay", nil}, + {57, "conjure_transport", nil}, + {58, "conjure_prefix", nil}, + {59, "conjure_stun", nil}, + {60, "conjure_empty_packet", intConverter}, + {61, "conjure_network", nil}, + {62, "conjure_port_number", intConverter}, + {63, "split_tunnel", nil}, + {64, "split_tunnel_regions", nil}, + {65, "dns_preresolved", nil}, + {66, "dns_preferred", nil}, + {67, "dns_transform", nil}, + {68, "dns_attempt", intConverter}, + {69, "http_transform", nil}, + {70, "seed_transform", nil}, + {71, "ossh_prefix", nil}, + {72, "tls_fragmented", intConverter}, + {73, "tls_padding", intConverter}, + {74, "tls_ossh_sni_server_name", nil}, + {75, "tls_ossh_transformed_host_name", intConverter}, + + // Specs: server.inproxyDialParams + + {76, "inproxy_connection_id", unpaddedBase64Converter}, + {77, "inproxy_relay_packet", unpaddedBase64Converter}, + {78, "inproxy_broker_is_replay", intConverter}, + {79, "inproxy_broker_transport", nil}, + {80, "inproxy_broker_fronting_provider_id", upperHexConverter}, + {81, "inproxy_broker_dial_address", nil}, + {82, "inproxy_broker_resolved_ip_address", nil}, + {83, "inproxy_broker_sni_server_name", nil}, + {84, "inproxy_broker_host_header", nil}, + {85, "inproxy_broker_transformed_host_name", intConverter}, + {86, "inproxy_broker_user_agent", nil}, + {87, "inproxy_broker_tls_profile", nil}, + {88, "inproxy_broker_tls_version", nil}, + {89, "inproxy_broker_tls_fragmented", intConverter}, + {90, "inproxy_broker_tls_padding", intConverter}, + {91, "inproxy_broker_client_bpf", nil}, + {92, "inproxy_broker_upstream_bytes_fragmented", intConverter}, + {93, "inproxy_broker_upstream_min_bytes_written", intConverter}, + {94, "inproxy_broker_upstream_max_bytes_written", intConverter}, + {95, "inproxy_broker_upstream_min_delayed", intConverter}, + {96, "inproxy_broker_upstream_max_delayed", intConverter}, + {97, "inproxy_broker_http_transform", nil}, + {98, "inproxy_broker_dns_preresolved", nil}, + {99, "inproxy_broker_dns_preferred", nil}, + {100, "inproxy_broker_dns_transform", nil}, + {101, "inproxy_broker_dns_attempt", intConverter}, + {102, "inproxy_webrtc_dns_preresolved", nil}, + {103, "inproxy_webrtc_dns_preferred", nil}, + {104, "inproxy_webrtc_dns_transform", nil}, + {105, "inproxy_webrtc_dns_attempt", intConverter}, + {106, "inproxy_webrtc_stun_server", nil}, + {107, "inproxy_webrtc_stun_server_resolved_ip_address", nil}, + {108, "inproxy_webrtc_stun_server_RFC5780", nil}, + {109, "inproxy_webrtc_stun_server_RFC5780_resolved_ip_address", nil}, + {110, "inproxy_webrtc_randomize_dtls", intConverter}, + {111, "inproxy_webrtc_padded_messages_sent", intConverter}, + {112, "inproxy_webrtc_padded_messages_received", intConverter}, + {113, "inproxy_webrtc_decoy_messages_sent", intConverter}, + {114, "inproxy_webrtc_decoy_messages_received", intConverter}, + {115, "inproxy_webrtc_local_ice_candidate_type", nil}, + {116, "inproxy_webrtc_local_ice_candidate_is_initiator", intConverter}, + {117, "inproxy_webrtc_local_ice_candidate_is_IPv6", intConverter}, + {118, "inproxy_webrtc_local_ice_candidate_port", intConverter}, + {119, "inproxy_webrtc_remote_ice_candidate_type", nil}, + {120, "inproxy_webrtc_remote_ice_candidate_is_IPv6", intConverter}, + {121, "inproxy_webrtc_remote_ice_candidate_port", intConverter}, + + // Specs: server.handshakeRequestParams + + {122, "missing_server_entry_signature", base64Converter}, + {123, "missing_server_entry_provider_id", base64Converter}, + + // Specs: server.uniqueUserParams + // + // - future enhancement: add a timestamp converter from RFC3339 to and + // from 64-bit Unix time? + + {124, "last_connected", nil}, + + // Specs: server.connectedRequestParams + + {125, "establishment_duration", intConverter}, + + // Specs: server.remoteServerListStatParams + + {126, "client_download_timestamp", nil}, + {127, "tunneled", intConverter}, + {128, "url", nil}, + {129, "etag", nil}, + {130, "bytes", intConverter}, + {131, "duration", intConverter}, + + // Specs: server.failedTunnelStatParams + // + // - given CBOR integer encoding, int key values greater than 128 may + // be a byte longer; this means some failed_tunnel required field + // key encodings may be longer than some optional handshake field + // key encodings; however, we prioritize reducing the handshake + // size, since it comes earlier in the tunnel flow. + + {132, "server_entry_tag", base64Converter}, + {133, "client_failed_timestamp", nil}, + {134, "record_probability", floatConverter}, + {135, "liveness_test_upstream_bytes", intConverter}, + {136, "liveness_test_sent_upstream_bytes", intConverter}, + {137, "liveness_test_downstream_bytes", intConverter}, + {138, "liveness_test_received_downstream_bytes", intConverter}, + {139, "bytes_up", intConverter}, + {140, "bytes_down", intConverter}, + {141, "tunnel_error", nil}, + + // Specs: status request payload + // + // - future enhancement: pack the statusData payload, which is + // currently sent as unpacked JSON. + + {142, "statusData", rawJSONConverter}, + + // Last key value = 142 + } + + for _, spec := range packedAPIParameterSpecs { + + if _, ok := packedAPIParametersNameToSpec[spec.name]; ok { + panic("duplicate parameter name") + } + packedAPIParametersNameToSpec[spec.name] = spec + + if _, ok := packedAPIParametersKeyToSpec[spec.key]; ok { + panic("duplicate parameter key") + } + packedAPIParametersKeyToSpec[spec.key] = spec + } + + // Packed server entry fields + // + // - must be appended to when server entry fields are added; existing key + // values cannot be reordered or reused. + // + // - limitation: use of converters means secrets/passwords/IDs are locked + // in as upper or lower hex with even digits, etc. + // + // - since webServerCertificate is omitted in non-legacy server entries, + // no PEM-encoding packer is implemented. + // + // - unlike API integer parameters and certain server entry fields, most + // port values are already int types and so not converted. + // + // - local-only fields are also packed, to allow for future use of packed + // encodings in the local datastore. + + packedServerEntryFieldSpecs := []packSpec{ + {1, "tag", base64Converter}, + {2, "ipAddress", nil}, + {3, "webServerPort", intConverter}, + {4, "webServerSecret", lowerHexConverter}, + {5, "webServerCertificate", nil}, + {6, "sshPort", nil}, + {7, "sshUsername", nil}, + {8, "sshPassword", lowerHexConverter}, + {9, "sshHostKey", unpaddedBase64Converter}, + {10, "sshObfuscatedPort", nil}, + {11, "sshObfuscatedQUICPort", nil}, + {12, "limitQUICVersions", nil}, + {13, "sshObfuscatedTapdancePort", nil}, + {14, "sshObfuscatedConjurePort", nil}, + {15, "sshObfuscatedKey", lowerHexConverter}, + {16, "capabilities", nil}, + {17, "region", nil}, + {18, "providerID", upperHexConverter}, + {19, "frontingProviderID", upperHexConverter}, + {20, "tlsOSSHPort", nil}, + {21, "meekServerPort", nil}, + {22, "meekCookieEncryptionPublicKey", base64Converter}, + {23, "meekObfuscatedKey", lowerHexConverter}, + {24, "meekFrontingHost", nil}, + {25, "meekFrontingHosts", nil}, + {26, "meekFrontingDomain", nil}, + {27, "meekFrontingAddresses", nil}, + {28, "meekFrontingAddressesRegex", nil}, + {29, "meekFrontingDisableSNI", nil}, + {30, "tacticsRequestPublicKey", base64Converter}, + {31, "tacticsRequestObfuscatedKey", base64Converter}, + {32, "configurationVersion", nil}, + {33, "signature", base64Converter}, + {34, "disableHTTPTransforms", nil}, + {35, "disableObfuscatedQUICTransforms", nil}, + {36, "disableOSSHTransforms", nil}, + {37, "disableOSSHPrefix", nil}, + {38, "inproxySessionPublicKey", unpaddedBase64Converter}, + {39, "inproxySessionRootObfuscationSecret", unpaddedBase64Converter}, + {40, "inproxySSHPort", nil}, + {41, "inproxyOSSHPort", nil}, + {42, "inproxyQUICPort", nil}, + {43, "inproxyMeekPort", nil}, + {44, "inproxyTlsOSSHPort", nil}, + {45, "localSource", nil}, + {46, "localTimestamp", nil}, + {47, "isLocalDerivedTag", nil}, + } + + for _, spec := range packedServerEntryFieldSpecs { + + if _, ok := packedServerEntryFieldsNameToSpec[spec.name]; ok { + panic("duplicate field name") + } + packedServerEntryFieldsNameToSpec[spec.name] = spec + + if _, ok := packedServerEntryFieldsKeyToSpec[spec.key]; ok { + panic("duplicate field key") + } + packedServerEntryFieldsKeyToSpec[spec.key] = spec + } + +} diff --git a/psiphon/common/protocol/packed_test.go b/psiphon/common/protocol/packed_test.go new file mode 100644 index 000000000..2a6a9fd5a --- /dev/null +++ b/psiphon/common/protocol/packed_test.go @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/accesscontrol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func TestPackedAPIParameters(t *testing.T) { + + params := make(common.APIParameters) + + for name, spec := range packedAPIParametersNameToSpec { + params[name] = makeTestPackValue(t, spec) + } + + packedParams, err := EncodePackedAPIParameters(params) + if err != nil { + t.Fatalf("EncodePackedAPIParameters failed: %v", err) + } + + unpackedParams, err := DecodePackedAPIParameters(packedParams) + if err != nil { + t.Fatalf("DecodePackedAPIParameters failed: %v", err) + } + + checkTestPackValues( + t, packedAPIParametersNameToSpec, params, unpackedParams) +} + +func TestPackedServerEntry(t *testing.T) { + + fields := make(ServerEntryFields) + + for name, spec := range packedServerEntryFieldsNameToSpec { + fields[name] = makeTestPackValue(t, spec) + } + unrecognized := "unrecognized_field_name" + fields[unrecognized] = prng.HexString(prng.Range(1, 1000)) + + packedFields, err := EncodePackedServerEntryFields(fields) + if err != nil { + t.Fatalf("EncodePackedServerEntryFields failed: %v", err) + } + + unpackedFields, err := DecodePackedServerEntryFields(packedFields) + if err != nil { + t.Fatalf("DecodePackedServerEntryFields failed: %v", err) + } + + checkTestPackValues( + t, packedServerEntryFieldsNameToSpec, fields, unpackedFields) + + if !reflect.DeepEqual(fields[unrecognized], unpackedFields[unrecognized]) { + t.Errorf("decoded value %s not equal: %T %+v != %T %+v", + unrecognized, + fields[unrecognized], fields[unrecognized], + unpackedFields[unrecognized], unpackedFields[unrecognized]) + } +} + +func makeTestPackValue(t *testing.T, spec packSpec) interface{} { + switch spec.converter { + case nil: + return prng.HexString(prng.Range(1, 1000)) + case intConverter: + return fmt.Sprintf("%d", prng.Intn(1>>32)) + case floatConverter: + return fmt.Sprintf("%f", float64(prng.Intn(1>>32))) + case lowerHexConverter: + return prng.HexString(prng.Range(1, 1000)) + case upperHexConverter: + return strings.ToUpper(prng.HexString((prng.Range(1, 1000)))) + case base64Converter: + return base64.StdEncoding.EncodeToString(prng.Bytes(prng.Range(1, 1000))) + case unpaddedBase64Converter: + return base64.RawStdEncoding.EncodeToString(prng.Bytes(prng.Range(1, 1000))) + case authorizationsConverter: + signingKey, _, err0 := accesscontrol.NewKeyPair("test-access-type") + auth1, _, err1 := accesscontrol.IssueAuthorization(signingKey, []byte("1"), time.Now().Add(1*time.Second)) + auth2, _, err2 := accesscontrol.IssueAuthorization(signingKey, []byte("2"), time.Now().Add(1*time.Second)) + if err0 != nil || err1 != nil || err2 != nil { + t.Fatalf("accesscontrol.NewKeyPair/IssueAuthorization failed") + } + return []string{auth1, auth2} + case rawJSONConverter: + return []byte(fmt.Sprintf(`{"A":%d, "B":%d}`, prng.Intn(1>>32), prng.Intn(1>>32))) + case compatibleJSONMapConverter: + return []any{map[any]any{"a": 1, "b": 2}, map[any]any{"a": 3, "b": 4}} + } + t.Fatalf("unexpected converter") + return nil +} + +func checkTestPackValues( + t *testing.T, + specs map[string]packSpec, + originalValues map[string]interface{}, + unpackedValues map[string]interface{}) { + + for name, spec := range specs { + originalValue := originalValues[name] + unpackedValue := unpackedValues[name] + if spec.converter == rawJSONConverter { + + // Special case: for rawJSONConverter, the input is bytes while + // the output is unmarshaled JSON. + var unmarshaledJSON map[string]interface{} + _ = json.Unmarshal(originalValue.([]byte), &unmarshaledJSON) + originalValue = unmarshaledJSON + + } else if spec.converter == compatibleJSONMapConverter { + + // Special case: for compatibleJSONMapConverter, reverse the + // conversion to produce the original value with the same type. + unpackedSlice, ok := unpackedValue.([]map[string]interface{}) + if !ok { + t.Errorf("expected []map[string]interface {} type") + return + } + entries := make([]interface{}, len(unpackedSlice)) + for i, unpackedEntry := range unpackedSlice { + entry := make(map[interface{}]interface{}) + for key, value := range unpackedEntry { + entry[key] = value + } + entries[i] = entry + } + unpackedValue = entries + } + if !reflect.DeepEqual(originalValue, unpackedValue) { + t.Errorf("decoded value %s not equal: %T %+v != %T %+v", + name, originalValue, originalValue, unpackedValue, unpackedValue) + } + } +} diff --git a/psiphon/common/protocol/protocol.go b/psiphon/common/protocol/protocol.go index a2d9f8422..e86046ea5 100644 --- a/psiphon/common/protocol/protocol.go +++ b/psiphon/common/protocol/protocol.go @@ -22,11 +22,14 @@ package protocol import ( "crypto/sha256" "encoding/json" + "fmt" + "strings" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/fxamacker/cbor/v2" ) const ( @@ -43,8 +46,14 @@ const ( TUNNEL_PROTOCOL_TAPDANCE_OBFUSCATED_SSH = "TAPDANCE-OSSH" TUNNEL_PROTOCOL_CONJURE_OBFUSCATED_SSH = "CONJURE-OSSH" + FRONTING_TRANSPORT_HTTPS = "FRONTED-HTTPS" + FRONTING_TRANSPORT_HTTP = "FRONTED-HTTP" + FRONTING_TRANSPORT_QUIC = "FRONTED-QUIC" + TUNNEL_PROTOCOLS_ALL = "All" + INPROXY_PROTOCOL_WEBRTC = "INPROXY-WEBRTC" + SERVER_ENTRY_SOURCE_EMBEDDED = "EMBEDDED" SERVER_ENTRY_SOURCE_REMOTE = "REMOTE" SERVER_ENTRY_SOURCE_DISCOVERY = "DISCOVERY" @@ -57,11 +66,12 @@ const ( CLIENT_CAPABILITY_SERVER_REQUESTS = "server-requests" - PSIPHON_API_HANDSHAKE_REQUEST_NAME = "psiphon-handshake" - PSIPHON_API_CONNECTED_REQUEST_NAME = "psiphon-connected" - PSIPHON_API_STATUS_REQUEST_NAME = "psiphon-status" - PSIPHON_API_OSL_REQUEST_NAME = "psiphon-osl" - PSIPHON_API_ALERT_REQUEST_NAME = "psiphon-alert" + PSIPHON_API_HANDSHAKE_REQUEST_NAME = "psiphon-handshake" + PSIPHON_API_CONNECTED_REQUEST_NAME = "psiphon-connected" + PSIPHON_API_STATUS_REQUEST_NAME = "psiphon-status" + PSIPHON_API_OSL_REQUEST_NAME = "psiphon-osl" + PSIPHON_API_ALERT_REQUEST_NAME = "psiphon-alert" + PSIPHON_API_INPROXY_RELAY_REQUEST_NAME = "psiphon-inproxy-relay" PSIPHON_API_ALERT_DISALLOWED_TRAFFIC = "disallowed-traffic" PSIPHON_API_ALERT_UNSAFE_TRAFFIC = "unsafe-traffic" @@ -71,8 +81,10 @@ const ( PSIPHON_API_CLIENT_SESSION_ID_LENGTH = 16 - PSIPHON_SSH_API_PROTOCOL = "ssh" - PSIPHON_WEB_API_PROTOCOL = "web" + PSIPHON_API_PROTOCOL_SSH = "ssh" + PSIPHON_API_PROTOCOL_WEB = "web" + PSIPHON_API_ENCODING_CBOR = "cbor" + PSIPHON_API_ENCODING_JSON = "json" PACKET_TUNNEL_CHANNEL_TYPE = "tun@psiphon.ca" RANDOM_STREAM_CHANNEL_TYPE = "random@psiphon.ca" @@ -101,6 +113,16 @@ func AllowServerEntrySourceWithUpstreamProxy(source string) bool { source == SERVER_ENTRY_SOURCE_REMOTE } +func PsiphonAPIProtocolIsValid(protocol string) bool { + return protocol == PSIPHON_API_PROTOCOL_SSH || + protocol == PSIPHON_API_PROTOCOL_WEB +} + +func PsiphonAPIEncodingIsValid(protocol string) bool { + return protocol == PSIPHON_API_ENCODING_CBOR || + protocol == PSIPHON_API_ENCODING_JSON +} + type TunnelProtocols []string func (t TunnelProtocols) Validate() error { @@ -124,6 +146,16 @@ func (t TunnelProtocols) PruneInvalid() TunnelProtocols { return u } +func (t TunnelProtocols) OnlyInproxyTunnelProtocols() TunnelProtocols { + u := make(TunnelProtocols, 0) + for _, p := range t { + if TunnelProtocolUsesInproxy(p) { + u = append(u, p) + } + } + return u +} + type LabeledTunnelProtocols map[string]TunnelProtocols func (labeledProtocols LabeledTunnelProtocols) Validate() error { @@ -181,7 +213,52 @@ var DisabledTunnelProtocols = TunnelProtocols{ TUNNEL_PROTOCOL_TAPDANCE_OBFUSCATED_SSH, } +var InproxyTunnelProtocols = TunnelProtocols{ + // Populated by init. +} + +func init() { + + // Instead of duplicating most TUNNEL_PROTOCOL* constants, + // programmatically add in-proxy variants of all (compatible) tunnel + // protocols. All in-proxy variants are default disabled. + + for _, p := range SupportedTunnelProtocols { + if TunnelProtocolIsCompatibleWithInproxy(p) { + InproxyTunnelProtocols = append( + InproxyTunnelProtocols, TunnelProtocolPlusInproxyWebRTC(p)) + } + } + SupportedTunnelProtocols = append(SupportedTunnelProtocols, InproxyTunnelProtocols...) + DefaultDisabledTunnelProtocols = append(DefaultDisabledTunnelProtocols, InproxyTunnelProtocols...) +} + +func TunnelProtocolPlusInproxyWebRTC(protocol string) string { + return fmt.Sprintf("%s-%s", INPROXY_PROTOCOL_WEBRTC, protocol) +} + +func TunnelProtocolMinusInproxy(protocol string) string { + // Remove the in-proxy 1st hop portion of the protocol name, which + // currently is always "INPROXY-WEBRTC". + protocol, _ = strings.CutPrefix(protocol, fmt.Sprintf("%s-", INPROXY_PROTOCOL_WEBRTC)) + return protocol +} + +func TunnelProtocolUsesInproxy(protocol string) bool { + // Check for the in-proxy 1st hop portion of the protocol name, which + // currently can only be "INPROXY-WEBRTC". + return strings.HasPrefix(protocol, INPROXY_PROTOCOL_WEBRTC) +} + +func TunnelProtocolIsCompatibleWithInproxy(protocol string) bool { + // The TapDance and Conjure destination addresses are not included + // in the server entry, so TAPDANCE-OSSH and CONJURE-OSSH dial + // destinations cannot be validated for inproxy use. + return !TunnelProtocolUsesRefractionNetworking(protocol) +} + func TunnelProtocolUsesTCP(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol != TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH && protocol != TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH } @@ -190,7 +267,13 @@ func TunnelProtocolUsesSSH(protocol string) bool { return true } +func TunnelProtocolIsObfuscatedSSH(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) + return protocol == TUNNEL_PROTOCOL_OBFUSCATED_SSH +} + func TunnelProtocolUsesObfuscatedSSH(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol != TUNNEL_PROTOCOL_SSH } @@ -198,46 +281,55 @@ func TunnelProtocolUsesObfuscatedSSH(protocol string) bool { // UsesTLS is ambiguous by itself as there are other protocols which use // a TLS layer, e.g. UNFRONTED-MEEK-HTTPS-OSSH. func TunnelProtocolUsesTLSOSSH(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH } func TunnelProtocolUsesMeek(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return TunnelProtocolUsesMeekHTTP(protocol) || TunnelProtocolUsesMeekHTTPS(protocol) || TunnelProtocolUsesFrontedMeekQUIC(protocol) } func TunnelProtocolUsesFrontedMeek(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_FRONTED_MEEK || protocol == TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP || protocol == TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH } func TunnelProtocolUsesMeekHTTP(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK || protocol == TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP } func TunnelProtocolUsesMeekHTTPNormalizer(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK } func TunnelProtocolUsesMeekHTTPS(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_FRONTED_MEEK || protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS || protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET } func TunnelProtocolUsesObfuscatedSessionTickets(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET } func TunnelProtocolUsesQUIC(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH || protocol == TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH } func TunnelProtocolUsesFrontedMeekQUIC(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH } @@ -257,10 +349,12 @@ func TunnelProtocolUsesConjure(protocol string) bool { func TunnelProtocolIsResourceIntensive(protocol string) bool { return TunnelProtocolUsesMeek(protocol) || TunnelProtocolUsesQUIC(protocol) || - TunnelProtocolUsesRefractionNetworking(protocol) + TunnelProtocolUsesRefractionNetworking(protocol) || + TunnelProtocolUsesInproxy(protocol) } func TunnelProtocolIsCompatibleWithFragmentor(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_SSH || protocol == TUNNEL_PROTOCOL_OBFUSCATED_SSH || protocol == TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH || @@ -283,6 +377,7 @@ func TunnelProtocolIsDirect(protocol string) bool { } func TunnelProtocolRequiresTLS12SessionTickets(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET } @@ -291,6 +386,7 @@ func TunnelProtocolRequiresTLS13Support(protocol string) bool { } func TunnelProtocolSupportsPassthrough(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS || protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET || protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK || @@ -298,10 +394,16 @@ func TunnelProtocolSupportsPassthrough(protocol string) bool { } func TunnelProtocolSupportsUpstreamProxy(protocol string) bool { - return !TunnelProtocolUsesQUIC(protocol) + return !TunnelProtocolUsesInproxy(protocol) && + !TunnelProtocolUsesQUIC(protocol) +} + +func TunnelProtocolSupportsTactics(protocol string) bool { + return TunnelProtocolUsesMeek(protocol) } func TunnelProtocolMayUseServerPacketManipulation(protocol string) bool { + protocol = TunnelProtocolMinusInproxy(protocol) return protocol == TUNNEL_PROTOCOL_SSH || protocol == TUNNEL_PROTOCOL_OBFUSCATED_SSH || protocol == TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH || @@ -310,6 +412,14 @@ func TunnelProtocolMayUseServerPacketManipulation(protocol string) bool { protocol == TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET } +func TunnelProtocolMayUseClientBPF(protocol string) bool { + if TunnelProtocolUsesInproxy(protocol) { + return false + } + return protocol != TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH && + protocol != TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH +} + func IsValidClientTunnelProtocol( clientProtocol string, listenerProtocol string, @@ -349,6 +459,38 @@ func IsValidClientTunnelProtocol( return false } +// FrontingTransports are transport protocols used for non-tunnel, fronted +// connections such as in-proxy broker requests. +type FrontingTransports []string + +func (transports FrontingTransports) Validate() error { + for _, t := range transports { + switch t { + case FRONTING_TRANSPORT_HTTPS, + FRONTING_TRANSPORT_HTTP, + FRONTING_TRANSPORT_QUIC: + default: + return errors.Tracef("invalid fronting transport: %s", t) + } + } + return nil +} + +// EquivilentTunnelProtocol returns the tunnel protocol equivilent of a +// fronting transport. This value may be used to select tactics, defined for +// the tunnel protocol, for the fronting transport. +func EquivilentTunnelProtocol(t string) (string, error) { + switch t { + case FRONTING_TRANSPORT_HTTPS: + return TUNNEL_PROTOCOL_FRONTED_MEEK, nil + case FRONTING_TRANSPORT_HTTP: + return TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP, nil + case FRONTING_TRANSPORT_QUIC: + return TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH, nil + } + return "", errors.Tracef("invalid fronting transport: %s", t) +} + const ( TLS_VERSION_12 = "TLSv1.2" TLS_VERSION_13 = "TLSv1.3" @@ -422,7 +564,6 @@ func TLS12ProfileOmitsSessionTickets(tlsProfile string) bool { type TLSProfiles []string func (profiles TLSProfiles) Validate(customTLSProfiles []string) error { - for _, p := range profiles { if !common.Contains(SupportedTLSProfiles, p) && !common.Contains(customTLSProfiles, p) && @@ -565,6 +706,10 @@ func ConjureTransportUsesSTUN(transport string) bool { return transport == CONJURE_TRANSPORT_DTLS_OSSH } +func ConjureTransportUsesDTLS(transport string) bool { + return transport == CONJURE_TRANSPORT_DTLS_OSSH +} + type ConjureTransports []string func (transports ConjureTransports) Validate() error { @@ -587,7 +732,6 @@ func (transports ConjureTransports) PruneInvalid() ConjureTransports { } type HandshakeResponse struct { - SSHSessionID string `json:"ssh_session_id"` Homepages []string `json:"homepages"` UpgradeClientVersion string `json:"upgrade_client_version"` PageViewRegexes []map[string]string `json:"page_view_regexes"` @@ -642,6 +786,28 @@ type AlertRequest struct { ActionURLs []string `json:"action"` } +// CBOREncoding defines the specific CBDR encoding used for all Psiphon CBOR +// message encoding. This is initialized to FIDO2 CTAP2 Canonical CBOR. +var CBOREncoding cbor.EncMode + +func init() { + encOptions := cbor.CTAP2EncOptions() + + // TimeRFC3339Nano matches the JSON encoding time format and is required + // for accesscontrol.packedAuthorization types, which marshal a time.Time. + encOptions.Time = cbor.TimeRFC3339Nano + + CBOREncoding, _ = encOptions.EncMode() +} + +type InproxyRelayRequest struct { + Packet []byte `cbor:"1,keyasint,omitempty"` +} + +type InproxyRelayResponse struct { + Packet []byte `cbor:"1,keyasint,omitempty"` +} + func DeriveSSHServerKEXPRNGSeed(obfuscatedKey string) (*prng.Seed, error) { // By convention, the obfuscatedKey will often be a hex-encoded 32 byte value, // but this isn't strictly required or validated, so we use SHA256 to map the diff --git a/psiphon/common/protocol/serverEntry.go b/psiphon/common/protocol/serverEntry.go index 56fe542fb..3515a5c41 100644 --- a/psiphon/common/protocol/serverEntry.go +++ b/psiphon/common/protocol/serverEntry.go @@ -32,6 +32,8 @@ import ( "fmt" "io" "net" + "regexp" + "strconv" "strings" "sync/atomic" "time" @@ -45,43 +47,50 @@ import ( // several protocols. Server entries are JSON records downloaded from // various sources. type ServerEntry struct { - Tag string `json:"tag"` - IpAddress string `json:"ipAddress"` - WebServerPort string `json:"webServerPort"` // not an int - WebServerSecret string `json:"webServerSecret"` - WebServerCertificate string `json:"webServerCertificate"` - SshPort int `json:"sshPort"` - SshUsername string `json:"sshUsername"` - SshPassword string `json:"sshPassword"` - SshHostKey string `json:"sshHostKey"` - SshObfuscatedPort int `json:"sshObfuscatedPort"` - SshObfuscatedQUICPort int `json:"sshObfuscatedQUICPort"` - LimitQUICVersions []string `json:"limitQUICVersions"` - SshObfuscatedTapDancePort int `json:"sshObfuscatedTapdancePort"` - SshObfuscatedConjurePort int `json:"sshObfuscatedConjurePort"` - SshObfuscatedKey string `json:"sshObfuscatedKey"` - Capabilities []string `json:"capabilities"` - Region string `json:"region"` - ProviderID string `json:"providerID"` - FrontingProviderID string `json:"frontingProviderID"` - TlsOSSHPort int `json:"tlsOSSHPort"` - MeekServerPort int `json:"meekServerPort"` - MeekCookieEncryptionPublicKey string `json:"meekCookieEncryptionPublicKey"` - MeekObfuscatedKey string `json:"meekObfuscatedKey"` - MeekFrontingHost string `json:"meekFrontingHost"` - MeekFrontingHosts []string `json:"meekFrontingHosts"` - MeekFrontingDomain string `json:"meekFrontingDomain"` - MeekFrontingAddresses []string `json:"meekFrontingAddresses"` - MeekFrontingAddressesRegex string `json:"meekFrontingAddressesRegex"` - MeekFrontingDisableSNI bool `json:"meekFrontingDisableSNI"` - TacticsRequestPublicKey string `json:"tacticsRequestPublicKey"` - TacticsRequestObfuscatedKey string `json:"tacticsRequestObfuscatedKey"` - ConfigurationVersion int `json:"configurationVersion"` - Signature string `json:"signature"` - DisableHTTPTransforms bool `json:"disableHTTPTransforms"` - DisableObfuscatedQUICTransforms bool `json:"disableObfuscatedQUICTransforms"` - DisableOSSHTransforms bool `json:"disableOSSHTransforms"` - DisableOSSHPrefix bool `json:"disableOSSHPrefix"` + Tag string `json:"tag,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + WebServerPort string `json:"webServerPort,omitempty"` // not an int + WebServerSecret string `json:"webServerSecret,omitempty"` + WebServerCertificate string `json:"webServerCertificate,omitempty"` + SshPort int `json:"sshPort,omitempty"` + SshUsername string `json:"sshUsername,omitempty"` + SshPassword string `json:"sshPassword,omitempty"` + SshHostKey string `json:"sshHostKey,omitempty"` + SshObfuscatedPort int `json:"sshObfuscatedPort,omitempty"` + SshObfuscatedQUICPort int `json:"sshObfuscatedQUICPort,omitempty"` + LimitQUICVersions []string `json:"limitQUICVersions,omitempty"` + SshObfuscatedTapDancePort int `json:"sshObfuscatedTapdancePort,omitempty"` + SshObfuscatedConjurePort int `json:"sshObfuscatedConjurePort,omitempty"` + SshObfuscatedKey string `json:"sshObfuscatedKey,omitempty"` + Capabilities []string `json:"capabilities,omitempty"` + Region string `json:"region,omitempty"` + ProviderID string `json:"providerID,omitempty"` + FrontingProviderID string `json:"frontingProviderID,omitempty"` + TlsOSSHPort int `json:"tlsOSSHPort,omitempty"` + MeekServerPort int `json:"meekServerPort,omitempty"` + MeekCookieEncryptionPublicKey string `json:"meekCookieEncryptionPublicKey,omitempty"` + MeekObfuscatedKey string `json:"meekObfuscatedKey,omitempty"` + MeekFrontingHost string `json:"meekFrontingHost,omitempty"` + MeekFrontingHosts []string `json:"meekFrontingHosts,omitempty"` + MeekFrontingDomain string `json:"meekFrontingDomain,omitempty"` + MeekFrontingAddresses []string `json:"meekFrontingAddresses,omitempty"` + MeekFrontingAddressesRegex string `json:"meekFrontingAddressesRegex,omitempty"` + MeekFrontingDisableSNI bool `json:"meekFrontingDisableSNI,omitempty"` + TacticsRequestPublicKey string `json:"tacticsRequestPublicKey,omitempty"` + TacticsRequestObfuscatedKey string `json:"tacticsRequestObfuscatedKey,omitempty"` + ConfigurationVersion int `json:"configurationVersion,omitempty"` + Signature string `json:"signature,omitempty"` + DisableHTTPTransforms bool `json:"disableHTTPTransforms,omitempty"` + DisableObfuscatedQUICTransforms bool `json:"disableObfuscatedQUICTransforms,omitempty"` + DisableOSSHTransforms bool `json:"disableOSSHTransforms,omitempty"` + DisableOSSHPrefix bool `json:"disableOSSHPrefix,omitempty"` + InproxySessionPublicKey string `json:"inproxySessionPublicKey,omitempty"` + InproxySessionRootObfuscationSecret string `json:"inproxySessionRootObfuscationSecret,omitempty"` + InproxySSHPort int `json:"inproxySSHPort,omitempty"` + InproxyOSSHPort int `json:"inproxyOSSHPort,omitempty"` + InproxyQUICPort int `json:"inproxyQUICPort,omitempty"` + InproxyMeekPort int `json:"inproxyMeekPort,omitempty"` + InproxyTlsOSSHPort int `json:"inproxyTlsOSSHPort,omitempty"` // These local fields are not expected to be present in downloaded server // entries. They are added by the client to record and report stats about @@ -426,6 +435,15 @@ func (fields ServerEntryFields) RemoveUnsignedFields() { delete(fields, "isLocalDerivedTag") } +// ToSignedFields checks for a signature and calls RemoveUnsignedFields. +func (fields ServerEntryFields) ToSignedFields() error { + if !fields.HasSignature() { + return errors.TraceNew("missing signature field") + } + fields.RemoveUnsignedFields() + return nil +} + // NewServerEntrySignatureKeyPair creates an ed25519 key pair for use in // server entry signing and verification. func NewServerEntrySignatureKeyPair() (string, string, error) { @@ -443,6 +461,17 @@ func NewServerEntrySignatureKeyPair() (string, string, error) { // GetCapability returns the server capability corresponding // to the tunnel protocol. func GetCapability(protocol string) string { + + // The "-OSSH" suffix drop is for legacy compatibility. Newer protocols, + // including in-proxy protocols, use the full protocol name as the + // capability. This avoids ambiguities such as in the case + // of "INPROXY-WEBRTC-OSSH", where a truncated "INPROXY-WEBRTC" is + // ambiguous. + + if TunnelProtocolUsesInproxy(protocol) { + return protocol + } + return strings.TrimSuffix(protocol, "-OSSH") } @@ -454,7 +483,7 @@ func GetTacticsCapability(protocol string) string { // hasCapability indicates if the server entry has the specified capability. // -// Any internal "PASSTHROUGH-v2 or "PASSTHROUGH" componant in the server +// Any internal "PASSTHROUGH-v2 or "PASSTHROUGH" component in the server // entry's capabilities is ignored. These PASSTHROUGH components are used to // mask protocols which are running the passthrough mechanisms from older // clients which do not implement the passthrough messages. Older clients @@ -466,6 +495,10 @@ func GetTacticsCapability(protocol string) string { // New clients must check SupportsOnlyQUICv1 before selecting a QUIC version; // for "QUICv1", this ensures that new clients also do not select gQUIC to // QUICv1-only servers. +// +// In-proxy tunnel protocols omit the "v1" and "PASSTHROUGH" suffixes. For +// in-proxy QUIC, gQUIC is never used; and for in-proxy HTTPS/TLS, clients +// always apply PASSTHROUGH-v2. func (serverEntry *ServerEntry) hasCapability(requiredCapability string) bool { for _, capability := range serverEntry.Capabilities { @@ -484,7 +517,9 @@ func (serverEntry *ServerEntry) hasCapability(requiredCapability string) bool { } // Special case: some capabilities may additionally support TLS-OSSH. - if requiredCapability == GetCapability(TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH) && capabilitySupportsTLSOSSH(originalCapability) { + // This does not apply to in-proxy TLS-OSSH. + if requiredCapability == GetCapability(TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH) && + capabilitySupportsTLSOSSH(originalCapability) { return true } } @@ -536,6 +571,9 @@ func (serverEntry *ServerEntry) ProtocolUsesLegacyPassthrough(protocol string) b // SupportsOnlyQUICv1 indicates that the QUIC-OSSH server supports only QUICv1 // and gQUIC versions should not be selected, as they will fail to connect // while sending atypical traffic to the server. +// +// SupportsOnlyQUICv1 strictly applies to QUIC-OSSH and not the in-proxy +// variant. func (serverEntry *ServerEntry) SupportsOnlyQUICv1() bool { quicCapability := GetCapability(TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH) return common.Contains(serverEntry.Capabilities, quicCapability+"v1") && @@ -547,6 +585,7 @@ func (serverEntry *ServerEntry) SupportsOnlyQUICv1() bool { type ConditionallyEnabledComponents interface { QUICEnabled() bool RefractionNetworkingEnabled() bool + InproxyEnabled() bool } // TunnelProtocolPortLists is a map from tunnel protocol names (or "All") to a @@ -561,7 +600,8 @@ func (serverEntry *ServerEntry) GetSupportedProtocols( limitTunnelProtocols TunnelProtocols, limitTunnelDialPortNumbers TunnelProtocolPortLists, limitQUICVersions QUICVersions, - excludeIntensive bool) TunnelProtocols { + excludeIntensive bool, + excludeInproxy bool) TunnelProtocols { supportedProtocols := make(TunnelProtocols, 0) @@ -589,9 +629,22 @@ func (serverEntry *ServerEntry) GetSupportedProtocols( continue } - if (TunnelProtocolUsesQUIC(tunnelProtocol) && !conditionallyEnabled.QUICEnabled()) || + // While in-proxy protocols are TunnelProtocolIsResourceIntensive, + // there's an additional use case for excluding in-proxy protocols as + // controlled by InproxyTunnelProtocolSelectionProbability. + + if excludeInproxy && TunnelProtocolUsesInproxy(tunnelProtocol) { + continue + } + + if (TunnelProtocolUsesQUIC(tunnelProtocol) && + !conditionallyEnabled.QUICEnabled()) || + (TunnelProtocolUsesRefractionNetworking(tunnelProtocol) && - !conditionallyEnabled.RefractionNetworkingEnabled()) { + !conditionallyEnabled.RefractionNetworkingEnabled()) || + + (TunnelProtocolUsesInproxy(tunnelProtocol) && + !conditionallyEnabled.InproxyEnabled()) { continue } @@ -639,64 +692,215 @@ func (serverEntry *ServerEntry) GetSupportedProtocols( supportedProtocols = append(supportedProtocols, tunnelProtocol) } + return supportedProtocols } +var frontedMeekHTTPSDialPortNumber = int32(443) + +// SetFrontedMeekHTTPDialPortNumber sets the FRONTED-MEEK-OSSH dial port +// number, which defaults to 443. Overriding the port number enables running +// test servers where binding to port 443 is not possible. +func SetFrontedMeekHTTPDialPortNumber(port int) { + atomic.StoreInt32(&frontedMeekHTTPSDialPortNumber, int32(port)) +} + func (serverEntry *ServerEntry) GetDialPortNumber(tunnelProtocol string) (int, error) { if !serverEntry.SupportsProtocol(tunnelProtocol) { return 0, errors.TraceNew("protocol not supported") } - switch tunnelProtocol { + if !TunnelProtocolUsesInproxy(tunnelProtocol) { - case TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH: - if serverEntry.TlsOSSHPort == 0 { - // Special case: a server which supports UNFRONTED-MEEK-HTTPS-OSSH - // or UNFRONTED-MEEK-SESSION-TICKET-OSSH also supports TLS-OSSH - // over the same port. + switch tunnelProtocol { + + case TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH: + if serverEntry.TlsOSSHPort == 0 { + // Special case: a server which supports UNFRONTED-MEEK-HTTPS-OSSH + // or UNFRONTED-MEEK-SESSION-TICKET-OSSH also supports TLS-OSSH + // over the same port. + return serverEntry.MeekServerPort, nil + } + return serverEntry.TlsOSSHPort, nil + + case TUNNEL_PROTOCOL_SSH: + return serverEntry.SshPort, nil + + case TUNNEL_PROTOCOL_OBFUSCATED_SSH: + return serverEntry.SshObfuscatedPort, nil + + case TUNNEL_PROTOCOL_TAPDANCE_OBFUSCATED_SSH: + return serverEntry.SshObfuscatedTapDancePort, nil + + case TUNNEL_PROTOCOL_CONJURE_OBFUSCATED_SSH: + return serverEntry.SshObfuscatedConjurePort, nil + + case TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH: + return serverEntry.SshObfuscatedQUICPort, nil + + case TUNNEL_PROTOCOL_FRONTED_MEEK, + TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH: + return int(atomic.LoadInt32(&frontedMeekHTTPSDialPortNumber)), nil + + case TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP: + return 80, nil + + case TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, + TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, + TUNNEL_PROTOCOL_UNFRONTED_MEEK: return serverEntry.MeekServerPort, nil } - return serverEntry.TlsOSSHPort, nil - case TUNNEL_PROTOCOL_SSH: - return serverEntry.SshPort, nil + } else { - case TUNNEL_PROTOCOL_OBFUSCATED_SSH: - return serverEntry.SshObfuscatedPort, nil + // Distinct dial/listening ports are used for tunnel protocols when + // used as an in-proxy 2nd hop, as the server will require a relayed + // in-proxy broker report for in-proxy 2nd hops. - case TUNNEL_PROTOCOL_TAPDANCE_OBFUSCATED_SSH: - return serverEntry.SshObfuscatedTapDancePort, nil + switch TunnelProtocolMinusInproxy(tunnelProtocol) { - case TUNNEL_PROTOCOL_CONJURE_OBFUSCATED_SSH: - return serverEntry.SshObfuscatedConjurePort, nil + case TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH: + if serverEntry.InproxyTlsOSSHPort == 0 { + return serverEntry.InproxyMeekPort, nil + } + return serverEntry.InproxyTlsOSSHPort, nil - case TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH: - return serverEntry.SshObfuscatedQUICPort, nil + case TUNNEL_PROTOCOL_SSH: + return serverEntry.InproxySSHPort, nil - case TUNNEL_PROTOCOL_FRONTED_MEEK, - TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH: - return int(atomic.LoadInt32(&frontedMeekHTTPSDialPortNumber)), nil + case TUNNEL_PROTOCOL_OBFUSCATED_SSH: + return serverEntry.InproxyOSSHPort, nil - case TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP: - return 80, nil + case TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH: + return serverEntry.InproxyQUICPort, nil + + case TUNNEL_PROTOCOL_FRONTED_MEEK, + TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH: + return int(atomic.LoadInt32(&frontedMeekHTTPSDialPortNumber)), nil + + case TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP: + return 80, nil + + case TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, + TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, + TUNNEL_PROTOCOL_UNFRONTED_MEEK: + return serverEntry.InproxyMeekPort, nil + } - case TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, - TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, - TUNNEL_PROTOCOL_UNFRONTED_MEEK: - return serverEntry.MeekServerPort, nil } return 0, errors.TraceNew("unknown protocol") } -var frontedMeekHTTPSDialPortNumber = int32(443) +// GetTLSSessionCacheKeyAddress returns a network address (IP:port) that is +// suitable to use as a TLS session cache key. +// +// By default, TLS implementations, including crypto/tls and utls use SNI as a +// session cache key, but this is not a suitable key when SNI is manipulated. +// When SNI is not present, these implementations fall back to using the peer +// remote address, which is also not a suitable key in cases where there is a +// non-TLS-terminating temporary intermediary, such as an in-proxy proxy. +// +// The key is unique to the Psiphon server and tunnel protocol listener. For +// direct tunnel protocols, the key precisely maps TLS sessions to the +// corresponding TLS server. For indirect tunnel protocols, with an +// intermediate TLS server, the key is an approximate map which assumes the +// redials will mostly use the same intermediate TLS server. +// +// Do not use the GetTLSSessionCacheKeyAddress value for dialing. +func (serverEntry *ServerEntry) GetTLSSessionCacheKeyAddress(tunnelProtocol string) (string, error) { -// SetFrontedMeekHTTPDialPortNumber sets the FRONTED-MEEK-OSSH dial port -// number, which defaults to 443. Overriding the port number enables running -// test servers where binding to port 443 is not possible. -func SetFrontedMeekHTTPDialPortNumber(port int) { - atomic.StoreInt32(&frontedMeekHTTPSDialPortNumber, int32(port)) + port, err := serverEntry.GetDialPortNumber(tunnelProtocol) + if err != nil { + return "", errors.Trace(err) + } + + return net.JoinHostPort(serverEntry.IpAddress, strconv.Itoa(port)), nil +} + +// IsValidInproxyDialAddress indicates whether the dial destination +// network/host/port matches the dial parameters for any of the tunnel +// protocols supported by the server entry. +// +// Limitations: +// - TAPDANCE-OSSH and CONJURE-OSSH are not supported. +// - The host header is not considered in the case of fronted protocols. +func (serverEntry *ServerEntry) IsValidInproxyDialAddress( + networkProtocol string, dialHost string, dialPortNumber int) bool { + + // The TapDance and Conjure destination addresses are not included + // in the server entry, so TAPDANCE-OSSH and CONJURE-OSSH dial + // destinations cannot be validated for in-proxy use. + + for _, tunnelProtocol := range SupportedTunnelProtocols { + + if !TunnelProtocolUsesInproxy(tunnelProtocol) { + continue + } + + if !serverEntry.SupportsProtocol(tunnelProtocol) { + continue + } + + usesTCP := TunnelProtocolUsesTCP(tunnelProtocol) + if (usesTCP && networkProtocol != "tcp") || (!usesTCP && networkProtocol != "udp") { + continue + } + + tunnelPortNumber, err := serverEntry.GetDialPortNumber(tunnelProtocol) + if err != nil || tunnelPortNumber != dialPortNumber { + // Silently fail on error as the server entry should be well-formed. + continue + } + + if !TunnelProtocolUsesFrontedMeek(tunnelProtocol) { + + // For all direct protocols, the destination host must be the + // server IP address. + + if serverEntry.IpAddress != dialHost { + continue + } + + } else { + + // For fronted protocols, the destination host may be domain and + // must match either MeekFrontingAddressesRegex or + // MeekFrontingAddresses. As in psiphon.selectFrontingParameters, + // MeekFrontingAddressesRegex takes precedence when not empty. + // + // As the host header value is not checked here, additional + // measures must be taken to ensure the destination is a Psiphon server. + + if len(serverEntry.MeekFrontingAddressesRegex) > 0 { + + re, err := regexp.Compile(serverEntry.MeekFrontingAddressesRegex) + if err != nil { + continue + } + + // The entire dialHost string must match the regex. + re.Longest() + match := re.FindString(dialHost) + if match == "" || match != dialHost { + continue + } + + } else { + + if !common.Contains(serverEntry.MeekFrontingAddresses, dialHost) { + continue + } + } + } + + // When all of the checks pass for this protocol, the input is a valid + // dial destination. + return true + } + + return false } // GetSupportedTacticsProtocols returns a list of tunnel protocols, @@ -729,28 +933,14 @@ func (serverEntry *ServerEntry) SupportsSSHAPIRequests() bool { return serverEntry.hasCapability(CAPABILITY_SSH_API_REQUESTS) } -func (serverEntry *ServerEntry) GetUntunneledWebRequestPorts() []string { - ports := make([]string, 0) - if serverEntry.hasCapability(CAPABILITY_UNTUNNELED_WEB_API_REQUESTS) { - // Server-side configuration quirk: there's a port forward from - // port 443 to the web server, which we can try, except on servers - // running FRONTED_MEEK, which listens on port 443. - if !serverEntry.SupportsProtocol(TUNNEL_PROTOCOL_FRONTED_MEEK) { - ports = append(ports, "443") - } - ports = append(ports, serverEntry.WebServerPort) - } - return ports +func (serverEntry *ServerEntry) HasProviderID() bool { + return serverEntry.ProviderID != "" } func (serverEntry *ServerEntry) HasSignature() bool { return serverEntry.Signature != "" } -func (serverEntry *ServerEntry) HasProviderID() bool { - return serverEntry.ProviderID != "" -} - func (serverEntry *ServerEntry) GetDiagnosticID() string { return TagToDiagnosticID(serverEntry.Tag) } diff --git a/psiphon/common/protocol/serverEntry_test.go b/psiphon/common/protocol/serverEntry_test.go index 2e58b064d..96cf12a7b 100644 --- a/psiphon/common/protocol/serverEntry_test.go +++ b/psiphon/common/protocol/serverEntry_test.go @@ -295,3 +295,86 @@ func testServerEntryListSignatures(t *testing.T, setExplicitTag bool) { t.Fatalf("AddSignature unexpectedly succeeded") } } + +func TestIsValidInproxyDialAddress(t *testing.T) { + + serverEntry := &ServerEntry{ + IpAddress: "192.168.0.1", + InproxySSHPort: 1, + InproxyOSSHPort: 2, + InproxyQUICPort: 3, + Capabilities: []string{ + "handshake", + "INPROXY-WEBRTC-SSH", + "INPROXY-WEBRTC-OSSH", + "INPROXY-WEBRTC-QUIC-OSSH", + "INPROXY-WEBRTC-FRONTED-MEEK-OSSH", + }, + MeekFrontingAddressesRegex: "[ab]+", + MeekServerPort: 443, + } + + testCases := []struct { + description string + networkProtocol string + dialHost string + dialPortNumber int + isValid bool + }{ + { + "valid IP dial", + "tcp", "192.168.0.1", 1, + true, + }, + { + "valid domain dial", + "tcp", "aaabbbaaabbb", 443, + true, + }, + { + "valid UDP dial", + "udp", "192.168.0.1", 3, + true, + }, + { + "invalid network dial", + "udp", "192.168.0.1", 1, + false, + }, + { + "invalid IP dial", + "tcp", "192.168.0.2", 1, + false, + }, + { + "invalid domain dial", + "tcp", "aaabbbcccbbb", 443, + false, + }, + { + "invalid port dial", + "tcp", "192.168.0.1", 4, + false, + }, + { + "invalid domain port dial", + "tcp", "aaabbbaaabbb", 80, + false, + }, + { + "invalid domain newline dial", + "tcp", "aaabbbaaabbb\nccc", 443, + false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { + if testCase.isValid != serverEntry.IsValidInproxyDialAddress( + testCase.networkProtocol, testCase.dialHost, testCase.dialPortNumber) { + + t.Errorf("unexpected IsValidInproxyDialAddress result") + } + }) + } +} diff --git a/psiphon/common/quic/obfuscator.go b/psiphon/common/quic/obfuscator.go index d0f4e4a2f..269ec3c55 100644 --- a/psiphon/common/quic/obfuscator.go +++ b/psiphon/common/quic/obfuscator.go @@ -31,6 +31,7 @@ import ( "sync/atomic" "time" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/Yawning/chacha20" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" @@ -106,6 +107,7 @@ const ( // introducing some risk of fragmentation and/or dropped packets. type ObfuscatedPacketConn struct { net.PacketConn + remoteAddr *net.UDPAddr isServer bool isIETFClient bool isDecoyClient bool @@ -135,7 +137,7 @@ func (p *peerMode) isStale() bool { func NewClientObfuscatedPacketConn( packetConn net.PacketConn, - isServer bool, + remoteAddr *net.UDPAddr, isIETFClient bool, isDecoyClient bool, obfuscationKey string, @@ -145,7 +147,8 @@ func NewClientObfuscatedPacketConn( return newObfuscatedPacketConn( packetConn, - isServer, + remoteAddr, + false, isIETFClient, isDecoyClient, obfuscationKey, @@ -156,7 +159,6 @@ func NewClientObfuscatedPacketConn( func NewServerObfuscatedPacketConn( packetConn net.PacketConn, - isServer bool, isIETFClient bool, isDecoyClient bool, obfuscationKey string, @@ -164,7 +166,8 @@ func NewServerObfuscatedPacketConn( return newObfuscatedPacketConn( packetConn, - isServer, + nil, + true, isIETFClient, isDecoyClient, obfuscationKey, @@ -176,6 +179,7 @@ func NewServerObfuscatedPacketConn( // newObfuscatedPacketConn creates a new ObfuscatedPacketConn. func newObfuscatedPacketConn( packetConn net.PacketConn, + remoteAddr *net.UDPAddr, isServer bool, isIETFClient bool, isDecoyClient bool, @@ -184,6 +188,13 @@ func newObfuscatedPacketConn( obfuscationNonceTransformerParameters *transforms.ObfuscatorSeedTransformerParameters, ) (*ObfuscatedPacketConn, error) { + // Store the specified remoteAddr, which is used to implement + // net.Conn.RemoteAddr, as the input packetConn may return a nil remote + // addr from ReadFrom. This must be set and is only set for clients. + if isServer != (remoteAddr == nil) { + return nil, errors.TraceNew("invalid remoteAddr") + } + // There is no replay of obfuscation "encryption", just padding. nonceSeed, err := prng.NewSeed() if err != nil { @@ -192,6 +203,7 @@ func newObfuscatedPacketConn( conn := &ObfuscatedPacketConn{ PacketConn: packetConn, + remoteAddr: remoteAddr, isServer: isServer, isIETFClient: isIETFClient, isDecoyClient: isDecoyClient, @@ -313,9 +325,9 @@ func (conn *ObfuscatedPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) // for x/net/internal/socket.Message and quic-go uses this one type for both // IPv4 and IPv6 packets. // -// Read, Write, and RemoteAddr are present to satisfy the net.Conn interface, -// to which ObfuscatedPacketConn is converted internally, via quic-go, in -// x/net/ipv[4|6] for OOB manipulation. These functions do not need to be +// Read and Write are present to satisfy the net.Conn interface, to which +// ObfuscatedPacketConn is converted internally, via quic-go, in x/net/ipv +// [4|6] for OOB manipulation. These functions do not need to be // implemented. func (conn *ObfuscatedPacketConn) ReadMsgUDP(p, oob []byte) (int, int, int, *net.UDPAddr, error) { @@ -362,7 +374,22 @@ func (conn *ObfuscatedPacketConn) Write(_ []byte) (int, error) { } func (conn *ObfuscatedPacketConn) RemoteAddr() net.Addr { - return nil + return conn.remoteAddr +} + +// GetMetrics implements the common.MetricsSource interface. +func (conn *ObfuscatedPacketConn) GetMetrics() common.LogFields { + + logFields := make(common.LogFields) + + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := conn.PacketConn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + + return logFields } func (conn *ObfuscatedPacketConn) readPacketWithType( @@ -371,6 +398,13 @@ func (conn *ObfuscatedPacketConn) readPacketWithType( for { n, oobn, flags, addr, isIETF, err := conn.readPacket(p, oob) + // Use the remoteAddr specified in NewClientObfuscatedPacketConn when + // the underlying ReadFrom does not return a remote addr. This is the + // case with inproxy.ClientConn. + if addr == nil { + addr = conn.remoteAddr + } + // When enabled, and when a packet is received, sometimes immediately // respond with a decoy packet, which is entirely random. Sending a // small number of these packets early in the connection is intended @@ -812,7 +846,9 @@ func (conn *ObfuscatedPacketConn) writePacket( func getMaxPreDiscoveryPacketSize(addr net.Addr) int { maxPacketSize := MAX_PRE_DISCOVERY_PACKET_SIZE_IPV4 - if udpAddr, ok := addr.(*net.UDPAddr); ok && udpAddr.IP.To4() == nil { + if udpAddr, ok := addr.(*net.UDPAddr); ok && + udpAddr != nil && udpAddr.IP != nil && udpAddr.IP.To4() == nil { + maxPacketSize = MAX_PRE_DISCOVERY_PACKET_SIZE_IPV6 } return maxPacketSize diff --git a/psiphon/common/quic/quic.go b/psiphon/common/quic/quic.go index 994b62136..81fc7124b 100644 --- a/psiphon/common/quic/quic.go +++ b/psiphon/common/quic/quic.go @@ -144,7 +144,7 @@ func Listen( obfuscationKey string, enableGQUIC bool) (net.Listener, error) { - certificate, privateKey, err := common.GenerateWebServerCertificate( + certificate, privateKey, _, err := common.GenerateWebServerCertificate( values.GetHostName()) if err != nil { return nil, errors.Trace(err) @@ -180,7 +180,7 @@ func Listen( // timely shutdown. obfuscatedPacketConn, err := NewServerObfuscatedPacketConn( - udpConn, true, false, false, obfuscationKey, seed) + udpConn, false, false, obfuscationKey, seed) if err != nil { udpConn.Close() return nil, errors.Trace(err) @@ -219,8 +219,10 @@ func Listen( // The non-strict case where ok is true and logFields is not nil is // ignored, and nothing is logged in that scenario. + strictMode := false + ok, logFields := clientRandomHistory.AddNew( - false, remoteAddr.String(), "client-hello-random", clientHelloRandom) + strictMode, remoteAddr.String(), "client-hello-random", clientHelloRandom) if !ok && logFields != nil { irregularTunnelLogger( common.IPAddressFromAddr(remoteAddr), @@ -437,7 +439,7 @@ func Dial( if isObfuscated(quicVersion) { obfuscatedPacketConn, err := NewClientObfuscatedPacketConn( packetConn, - false, + remoteAddr, isIETFVersionNumber(versionNumber), isDecoy(quicVersion), obfuscationKey, @@ -686,6 +688,21 @@ func (conn *Conn) SetWriteDeadline(t time.Time) error { return conn.stream.SetWriteDeadline(t) } +// GetMetrics implements the common.MetricsSource interface. +func (conn *Conn) GetMetrics() common.LogFields { + + logFields := make(common.LogFields) + + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := conn.packetConn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + + return logFields +} + // QUICTransporter implements the psiphon.transporter interface, used in // psiphon.MeekConn for HTTP requests, which requires a RoundTripper and // CloseIdleConnections. diff --git a/psiphon/common/raceDetector_disabled.go b/psiphon/common/raceDetector_disabled.go new file mode 100644 index 000000000..33f3ddd41 --- /dev/null +++ b/psiphon/common/raceDetector_disabled.go @@ -0,0 +1,25 @@ +//go:build !race +// +build !race + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package common + +const IsRaceDetectorEnabled = false diff --git a/psiphon/common/raceDetector_enabled.go b/psiphon/common/raceDetector_enabled.go new file mode 100644 index 000000000..de59574b9 --- /dev/null +++ b/psiphon/common/raceDetector_enabled.go @@ -0,0 +1,25 @@ +//go:build race +// +build race + +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package common + +const IsRaceDetectorEnabled = true diff --git a/psiphon/common/refraction/refraction.go b/psiphon/common/refraction/refraction.go index e77392f78..420ce20e0 100644 --- a/psiphon/common/refraction/refraction.go +++ b/psiphon/common/refraction/refraction.go @@ -866,7 +866,7 @@ type dialManager struct { runCtx context.Context stopRunning context.CancelFunc - conns *common.Conns + conns *common.Conns[net.Conn] } func newDialManager() *dialManager { @@ -874,7 +874,7 @@ func newDialManager() *dialManager { return &dialManager{ runCtx: runCtx, stopRunning: stopRunning, - conns: common.NewConns(), + conns: common.NewConns[net.Conn](), } } diff --git a/psiphon/common/resolver/resolver.go b/psiphon/common/resolver/resolver.go index e7daf3de7..887ad9c69 100644 --- a/psiphon/common/resolver/resolver.go +++ b/psiphon/common/resolver/resolver.go @@ -272,7 +272,7 @@ type resolverMetrics struct { responsesIPv6 int defaultResolves int defaultSuccesses int - peakInFlight int64 + peakInFlight int minRTT time.Duration maxRTT time.Duration } @@ -453,11 +453,15 @@ func (r *Resolver) MakeResolveParameters( // ResolveAddress splits the input host:port address, calls ResolveIP to // resolve the IP address of the host, selects an IP if there are multiple, // and returns a rejoined IP:port. +// +// IP address selection is random. When network input is set +// to "ip4"/"tcp4"/"udp4" or "ip6"/"tcp6"/"udp6", selection is limited to +// IPv4 or IPv6, respectively. func (r *Resolver) ResolveAddress( ctx context.Context, networkID string, params *ResolveParameters, - address string) (string, error) { + network, address string) (string, error) { hostname, port, err := net.SplitHostPort(address) if err != nil { @@ -469,7 +473,35 @@ func (r *Resolver) ResolveAddress( return "", errors.Trace(err) } - return net.JoinHostPort(IPs[prng.Intn(len(IPs))].String(), port), nil + // Don't shuffle or otherwise mutate the slice returned by ResolveIP. + permutedIndexes := prng.Perm(len(IPs)) + + index := 0 + + switch network { + case "ip4", "tcp4", "udp4": + index = -1 + for _, i := range permutedIndexes { + if IPs[i].To4() != nil { + index = i + break + } + } + case "ip6", "tcp6", "udp6": + index = -1 + for _, i := range permutedIndexes { + if IPs[i].To4() == nil { + index = i + break + } + } + } + + if index == -1 { + return "", errors.Tracef("no IP for network '%s'", network) + } + + return net.JoinHostPort(IPs[index].String(), port), nil } // ResolveIP resolves a domain name. @@ -495,11 +527,14 @@ func (r *Resolver) ResolveAddress( // often blocked or less common. Instead, ResolveIP makes a best effort to // evade plaintext UDP DNS interference by ignoring invalid responses and by // optionally applying protocol transforms that may evade blocking. +// +// Due to internal caching, the caller must not mutate returned net.IP slice +// or entries. func (r *Resolver) ResolveIP( ctx context.Context, networkID string, params *ResolveParameters, - hostname string) (x []net.IP, y error) { + hostname string) ([]net.IP, error) { // ResolveIP does _not_ lock r.mutex for the lifetime of the function, to // ensure many ResolveIP calls can run concurrently. @@ -588,6 +623,8 @@ func (r *Resolver) ResolveIP( // logic. IPs := r.getCache(hostname) if IPs != nil { + // TODO: it would be safer to make and return a copy of the cached + // slice, instead of depending on all callers to not mutate the slice. return IPs, nil } @@ -631,14 +668,15 @@ func (r *Resolver) ResolveIP( // Orchestrate the DNS requests - resolveCtx, cancelFunc := context.WithCancel(ctx) - defer cancelFunc() + resolveCtx, cancelFunc := context.WithCancelCause(ctx) + defer cancelFunc(nil) waitGroup := new(sync.WaitGroup) - conns := common.NewConns() + conns := common.NewConns[net.Conn]() type answer struct { - attempt int - IPs []net.IP - TTLs []time.Duration + attempt int + questionType resolverQuestionType + IPs []net.IP + TTLs []time.Duration } var maxAttempts int if params.PreferAlternateDNSServer { @@ -648,15 +686,32 @@ func (r *Resolver) ResolveIP( maxAttempts = len(servers) * params.AttemptsPerServer } answerChan := make(chan *answer, maxAttempts*2) - inFlight := int64(0) - awaitA := int32(1) - awaitAAAA := int32(1) - if !hasIPv6Route { - awaitAAAA = 0 - } + inFlight := 0 + awaitA := true + awaitAAAA := hasIPv6Route var result *answer var lastErr atomic.Value + trackResult := func(a *answer) { + + // A result is sent from every attempt goroutine that is launched, + // even in the case of an error, in which case the result is nil. + // Update the number of in-flight attempts as results are received. + // Mark no longer awaiting A or AAAA as long as there is a valid + // response, even if there are no IPs in the IPv6 case. + if inFlight > 0 { + inFlight -= 1 + } + if a != nil { + switch a.questionType { + case resolverQuestionTypeA: + awaitA = false + case resolverQuestionTypeAAAA: + awaitAAAA = false + } + } + } + stop := false for i := 0; !stop && i < maxAttempts; i++ { @@ -694,20 +749,28 @@ func (r *Resolver) ResolveIP( // correct, we must increment inFlight in this outer goroutine to // ensure the await logic sees either inFlight > 0 or an answer // in the channel. - r.updateMetricPeakInFlight(atomic.AddInt64(&inFlight, 1)) + inFlight += 1 + r.updateMetricPeakInFlight(inFlight) go func(attempt int, questionType resolverQuestionType, useProtocolTransform bool) { defer waitGroup.Done() - // We must decrement inFlight only after sending an answer and - // setting awaitA or awaitAAAA to ensure that the await logic - // in the outer goroutine will see inFlight 0 only once those - // operations are complete. - // - // We cannot wait and decrement inFlight when the outer - // goroutine receives answers, as no answer is sent in some - // cases, such as when the resolve fails due to NXDOMAIN. - defer atomic.AddInt64(&inFlight, -1) + // Always send a result back to the main loop, even if this + // attempt fails, so the main loop proceeds to the next + // iteration immediately. Nil is sent in failure cases. When + // the answer is not nil, it's already been sent. + var a *answer + defer func() { + if a == nil { + // The channel should have sufficient buffering for + // the send to never block; the default case is used + // to avoid a hang in the case of a bug. + select { + case answerChan <- a: + default: + } + } + }() // The request count metric counts the _intention_ to send // requests, as there's a possibility that newResolverConn or @@ -734,7 +797,8 @@ func (r *Resolver) ResolveIP( // request conns so that they can be closed, and any blocking // network I/O interrupted, below, if resolveCtx is done. if !conns.Add(conn) { - // Add fails when conns is already closed. + // Add fails when conns is already closed. Do not + // overwrite lastErr in this case. return } @@ -791,23 +855,29 @@ func (r *Resolver) ResolveIP( return } - if len(IPs) > 0 { - select { - case answerChan <- &answer{attempt: attempt, IPs: IPs, TTLs: TTLs}: - default: - } - } - - // Mark no longer awaiting A or AAAA as long as there is a - // valid response, even if there are no IPs in the IPv6 case. + // Update response stats. switch questionType { case resolverQuestionTypeA: r.updateMetricResponsesIPv4() - atomic.StoreInt32(&awaitA, 0) case resolverQuestionTypeAAAA: r.updateMetricResponsesIPv6() - atomic.StoreInt32(&awaitAAAA, 0) - default: + } + + // Send the answer back to the main loop. + if len(IPs) > 0 || questionType == resolverQuestionTypeAAAA { + a = &answer{ + attempt: attempt, + questionType: questionType, + IPs: IPs, + TTLs: TTLs} + + // The channel should have sufficient buffering for + // the send to never block; the default case is used + // to avoid a hang in the case of a bug. + select { + case answerChan <- a: + default: + } } }(i+1, questionType, useProtocolTransform) @@ -817,11 +887,14 @@ func (r *Resolver) ResolveIP( select { case result = <-answerChan: - // When the first answer, a response with valid IPs, arrives, exit - // the attempts loop. The following await branch may collect - // additional answers. - params.setFirstAttemptWithAnswer(result.attempt) - stop = true + trackResult(result) + if result != nil { + // When the first answer, a response with valid IPs, arrives, exit + // the attempts loop. The following await branch may collect + // additional answers. + params.setFirstAttemptWithAnswer(result.attempt) + stop = true + } case <-timer.C: // When requestTimeout arrives, loop around and launch the next // attempt; leave the existing requests running in case they @@ -832,7 +905,8 @@ func (r *Resolver) ResolveIP( // // Append the existing lastErr, which may convey useful // information to be reported in a failed_tunnel error message. - lastErr.Store(errors.Tracef("%v (lastErr: %v)", ctx.Err(), lastErr.Load())) + lastErr.Store(errors.Tracef( + "%v (lastErr: %v)", context.Cause(resolveCtx), lastErr.Load())) stop = true } } @@ -850,8 +924,11 @@ func (r *Resolver) ResolveIP( for loop := true; loop; { select { case nextAnswer := <-answerChan: - result.IPs = append(result.IPs, nextAnswer.IPs...) - result.TTLs = append(result.TTLs, nextAnswer.TTLs...) + trackResult(nextAnswer) + if nextAnswer != nil { + result.IPs = append(result.IPs, nextAnswer.IPs...) + result.TTLs = append(result.TTLs, nextAnswer.TTLs...) + } default: loop = false } @@ -867,8 +944,8 @@ func (r *Resolver) ResolveIP( // have an answer. if result != nil && resolveCtx.Err() == nil && - atomic.LoadInt64(&inFlight) > 0 && - (atomic.LoadInt32(&awaitA) != 0 || atomic.LoadInt32(&awaitAAAA) != 0) && + inFlight > 0 && + (awaitA || awaitAAAA) && params.AwaitTimeout > 0 { resetTimer(params.AwaitTimeout) @@ -878,8 +955,11 @@ func (r *Resolver) ResolveIP( stop := false select { case nextAnswer := <-answerChan: - result.IPs = append(result.IPs, nextAnswer.IPs...) - result.TTLs = append(result.TTLs, nextAnswer.TTLs...) + trackResult(nextAnswer) + if nextAnswer != nil { + result.IPs = append(result.IPs, nextAnswer.IPs...) + result.TTLs = append(result.TTLs, nextAnswer.TTLs...) + } case <-timer.C: timerDrained = true stop = true @@ -887,9 +967,8 @@ func (r *Resolver) ResolveIP( stop = true } - if stop || - atomic.LoadInt64(&inFlight) == 0 || - (atomic.LoadInt32(&awaitA) == 0 && atomic.LoadInt32(&awaitAAAA) == 0) { + if stop || inFlight == 0 || (!awaitA && !awaitAAAA) { + break } } @@ -900,13 +979,16 @@ func (r *Resolver) ResolveIP( } // Interrupt all workers. - cancelFunc() + cancelFunc(errors.TraceNew("resolve canceled")) conns.CloseAll() waitGroup.Wait() // When there's no answer, return the last error. if result == nil { err := lastErr.Load() + if err == nil { + err = context.Cause(resolveCtx) + } if err == nil { err = errors.TraceNew("unexpected missing error") } @@ -1060,6 +1142,13 @@ func (r *Resolver) updateNetworkState(networkID string) { // transparently). if updateIPv6Route { + // TODO: the HasIPv6Route callback provides hasRoutableIPv6Interface + // functionality on platforms where that internal implementation + // fails. In particular, "route ip+net: netlinkrib: permission + // denied" on Android; see Go issue 40569). This Android case can be + // fixed, and the callback retired, by sharing the workaround now + // implemented in inproxy.pionNetwork.Interfaces. + if r.networkConfig.HasIPv6Route != nil { r.hasIPv6Route = r.networkConfig.HasIPv6Route() @@ -1286,7 +1375,7 @@ func (r *Resolver) updateMetricDefaultResolver(success bool) { } } -func (r *Resolver) updateMetricPeakInFlight(inFlight int64) { +func (r *Resolver) updateMetricPeakInFlight(inFlight int) { r.mutex.Lock() defer r.mutex.Unlock() @@ -1324,7 +1413,9 @@ func hasRoutableIPv6Interface() (bool, error) { for _, in := range interfaces { if (in.Flags&net.FlagUp == 0) || - (in.Flags&(net.FlagLoopback|net.FlagPointToPoint)) != 0 { + // Note: don't exclude interfaces with the net.FlagPointToPoint + // flag, which is set for certain mobile networks + (in.Flags&net.FlagLoopback != 0) { continue } @@ -1451,7 +1542,7 @@ func performDNSQuery( // information about why a response was rejected. err := lastErr if err == nil { - err = errors.Trace(resolveCtx.Err()) + err = errors.Trace(context.Cause(resolveCtx)) } return nil, nil, RTT, err diff --git a/psiphon/common/resolver/resolver_test.go b/psiphon/common/resolver/resolver_test.go index 4ce824d91..9877263a8 100644 --- a/psiphon/common/resolver/resolver_test.go +++ b/psiphon/common/resolver/resolver_test.go @@ -87,7 +87,7 @@ func runTestMakeResolveParameters() error { if err != nil { return errors.Trace(err) } - _, err = params.Set("", false, paramValues) + _, err = params.Set("", 0, paramValues) if err != nil { return errors.Trace(err) } @@ -136,7 +136,7 @@ func runTestMakeResolveParameters() error { paramValues["DNSResolverPreresolvedIPAddressProbability"] = 0.0 - _, err = params.Set("", false, paramValues) + _, err = params.Set("", 0, paramValues) if err != nil { return errors.Trace(err) } @@ -167,7 +167,7 @@ func runTestMakeResolveParameters() error { paramValues["DNSResolverProtocolTransformProbability"] = 0.0 paramValues["DNSResolverIncludeEDNS0Probability"] = 0.0 - _, err = params.Set("", false, paramValues) + _, err = params.Set("", 0, paramValues) if err != nil { return errors.Trace(err) } @@ -505,7 +505,7 @@ func runTestResolver() error { domainAddress := net.JoinHostPort(exampleDomain, "443") - address, err := resolver.ResolveAddress(ctx, networkID, params, domainAddress) + address, err := resolver.ResolveAddress(ctx, networkID, params, "", domainAddress) if err != nil { return errors.Trace(err) } diff --git a/psiphon/common/tactics/tactics.go b/psiphon/common/tactics/tactics.go old mode 100755 new mode 100644 index a72bb7c4a..58c711eb2 --- a/psiphon/common/tactics/tactics.go +++ b/psiphon/common/tactics/tactics.go @@ -560,7 +560,8 @@ func (server *Server) Validate() error { applyParameters, filteredTactics.Parameters) } - _, err = params.Set("", false, applyParameters...) + _, err = params.Set( + "", parameters.ValidationServerSide, applyParameters...) if err != nil { return errors.Trace(err) } @@ -919,6 +920,7 @@ func (server *Server) GetTactics( var speedTestSamples []SpeedTestSample err := getJSONRequestParam(apiParams, SPEED_TEST_SAMPLES_PARAMETER_NAME, &speedTestSamples) + if err != nil { // TODO: log speed test parameter errors? // This API param is not explicitly validated elsewhere. @@ -980,7 +982,7 @@ func getJSONRequestParam(apiParams common.APIParameters, name string, value inte // Remarshal the parameter from common.APIParameters, as the initial API parameter // unmarshal will not have known the correct target type. I.e., instead of doing - // unmarhsal-into-struct, common.APIParameters will have an unmarshal-into-interface + // unmarshal-into-struct, common.APIParameters will have an unmarshal-into-interface // value as described here: https://golang.org/pkg/encoding/json/#Unmarshal. jsonValue, err := json.Marshal(apiParams[name]) @@ -1304,12 +1306,17 @@ func SetTacticsAPIParameters( return nil } -// HandleTacticsPayload updates the stored tactics with the given payload. -// If the payload has a new tag/tactics, this is stored and a new expiry -// time is set. If the payload has the same tag, the existing tactics are -// retained and the exipry is extended using the previous TTL. -// HandleTacticsPayload is called by the Psiphon client to handle the -// tactics payload in the handshake response. +// HandleTacticsPayload updates the stored tactics with the given payload. If +// the payload has a new tag/tactics, this is stored and a new expiry time is +// set. If the payload has the same tag, the existing tactics are retained, +// the expiry is extended using the previous TTL, and a nil record is +// rerturned. +// +// HandleTacticsPayload is called by the Psiphon client to handle the tactics +// payload in the API handshake and inproxy broker responses. As the Psiphon +// client has already called UseStoredTactics/FetchTactics and applied +// tactics, the nil record return value allows the caller to skip an +// unnecessary tactics parameters application. func HandleTacticsPayload( storer Storer, networkID string, @@ -1338,18 +1345,27 @@ func HandleTacticsPayload( return nil, errors.Trace(err) } - err = applyTacticsPayload(storer, networkID, record, payload) + newTactics, err := applyTacticsPayload(storer, networkID, record, payload) if err != nil { return nil, errors.Trace(err) } - // TODO: if tags match, just set an expiry record, not the whole tactics record? + // Store the tactics record, which may contain new tactics, and always + // contains an extended TTL. + // + // TODO: if tags match, just set an expiry record, not the whole tactics + // record? err = setStoredTacticsRecord(storer, networkID, record) if err != nil { return nil, errors.Trace(err) } + if !newTactics { + // Don't return a tactics record when the tactics have not changed. + record = nil + } + return record, nil } @@ -1515,7 +1531,7 @@ func FetchTactics( return nil, errors.Trace(err) } - err = applyTacticsPayload(storer, networkID, record, payload) + _, err = applyTacticsPayload(storer, networkID, record, payload) if err != nil { return nil, errors.Trace(err) } @@ -1679,41 +1695,64 @@ func applyTacticsPayload( storer Storer, networkID string, record *Record, - payload *Payload) error { + payload *Payload) (bool, error) { + + newTactics := false if payload.Tag == "" { - return errors.TraceNew("invalid tag") + return newTactics, errors.TraceNew("invalid tag") } // Replace the tactics data when the tags differ. if payload.Tag != record.Tag { + + // There is a potential race condition that may arise with multiple + // concurrent requests which may return tactics, such as in-proxy + // proxy announcements. In this scenario, an in-flight request + // matches the existing current tactics tag; then a concurrent + // request is sent while new tactics become available and its + // response returns new tactics and a new tag; the client applies the + // new tags and tactics; then, finally, the response for the first + // request arrives with a now apparently different tag -- the + // original tag -- but no tactics payload. In this case, simply fail + // the apply operation. + + // A nil payload.Tactics, of type json.RawMessage, can be serialized + // as the JSON "null". + if payload.Tactics == nil || + bytes.Equal(payload.Tactics, []byte("null")) { + return newTactics, errors.TraceNew("missing tactics") + } + record.Tag = payload.Tag record.Tactics = Tactics{} err := json.Unmarshal(payload.Tactics, &record.Tactics) if err != nil { - return errors.Trace(err) + return newTactics, errors.Trace(err) } + + newTactics = true } // Note: record.Tactics.TTL is validated by server ttl, err := time.ParseDuration(record.Tactics.TTL) if err != nil { - return errors.Trace(err) + return newTactics, errors.Trace(err) } if ttl <= 0 { - return errors.TraceNew("invalid TTL") + return newTactics, errors.TraceNew("invalid TTL") } if record.Tactics.Probability <= 0.0 { - return errors.TraceNew("invalid probability") + return newTactics, errors.TraceNew("invalid probability") } // Set or extend the expiry. record.Expiry = time.Now().UTC().Add(ttl) - return nil + return newTactics, nil } func setStoredTacticsRecord( diff --git a/psiphon/common/tactics/tactics_test.go b/psiphon/common/tactics/tactics_test.go old mode 100755 new mode 100644 index 11bf631a9..ff268d277 --- a/psiphon/common/tactics/tactics_test.go +++ b/psiphon/common/tactics/tactics_test.go @@ -304,8 +304,8 @@ func TestTactics(t *testing.T) { t.Fatalf("Unexpected probability: %f", r.Tactics.Probability) } - // skipOnError is true for Psiphon clients - counts, err := p.Set(r.Tag, true, r.Tactics.Parameters) + // ValidationSkipOnError is set for Psiphon clients + counts, err := p.Set(r.Tag, parameters.ValidationSkipOnError, r.Tactics.Parameters) if err != nil { t.Fatalf("Apply failed: %s", err) } @@ -541,8 +541,19 @@ func TestTactics(t *testing.T) { t.Fatalf("HandleTacticsPayload failed: %s", err) } - if handshakeTacticsRecord == nil { - t.Fatalf("expected tactics record") + // When tactic parameters are unchanged, HandleTacticsPayload returns nil, + // so that callers do not apply tactics unnecessarily. + // + // Check that nil is returned, but then directly load the record stored by + // HandleTacticsPayload in order to check metadata including the updated + // TTL. + + if handshakeTacticsRecord != nil { + t.Fatalf("unexpected tactics record") + } + handshakeTacticsRecord, err = getStoredTacticsRecord(storer, networkID) + if err != nil { + t.Fatalf("getStoredTacticsRecord failed: %s", err) } if fetchTacticsRecord.Tag != handshakeTacticsRecord.Tag { @@ -1124,6 +1135,10 @@ func (l *testLogger) LogMetric(metric string, fields common.LogFields) { fmt.Printf("METRIC: %s: fields=%+v\n", metric, fields) } +func (l *testLogger) IsLogLevelDebug() bool { + return true +} + type testLoggerTrace struct { trace string fields common.LogFields diff --git a/psiphon/common/tun/tun.go b/psiphon/common/tun/tun.go index 394a7cf23..b3efb79ad 100644 --- a/psiphon/common/tun/tun.go +++ b/psiphon/common/tun/tun.go @@ -2377,7 +2377,7 @@ func processPacket( dataOffset := 0 if protocol == internetProtocolTCP { - if len(packet) < 33 { + if len(packet) < 38 { metrics.rejectedPacket(direction, packetRejectTCPProtocolLength) return false } @@ -2431,7 +2431,7 @@ func processPacket( dataOffset := 0 if protocol == internetProtocolTCP { - if len(packet) < 53 { + if len(packet) < 58 { metrics.rejectedPacket(direction, packetRejectTCPProtocolLength) return false } diff --git a/psiphon/common/tun/tun_test.go b/psiphon/common/tun/tun_test.go index efd8da2ae..3a95f438c 100644 --- a/psiphon/common/tun/tun_test.go +++ b/psiphon/common/tun/tun_test.go @@ -817,6 +817,10 @@ func (logger *testLogger) LogMetric(metric string, fields common.LogFields) { } } +func (l *testLogger) IsLogLevelDebug() bool { + return true +} + func (logger *testLogger) getLastPacketMetrics() common.LogFields { if logger.packetMetrics == nil { return nil diff --git a/psiphon/common/utils.go b/psiphon/common/utils.go index 7a91adda6..0fbc149f6 100644 --- a/psiphon/common/utils.go +++ b/psiphon/common/utils.go @@ -34,6 +34,7 @@ import ( "time" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/wildcard" ) @@ -238,3 +239,40 @@ func SleepWithContext(ctx context.Context, duration time.Duration) { case <-ctx.Done(): } } + +// SleepWithJitter returns after the specified duration, with random jitter +// applied, or once the input ctx is done, whichever is first. +func SleepWithJitter(ctx context.Context, duration time.Duration, jitter float64) { + timer := time.NewTimer(prng.JitterDuration(duration, jitter)) + defer timer.Stop() + select { + case <-ctx.Done(): + case <-timer.C: + } +} + +// ValueOrDefault returns the input value, or, when value is the zero value of +// its type, defaultValue. +func ValueOrDefault[T comparable](value, defaultValue T) T { + var zero T + if value == zero { + return defaultValue + } + return value +} + +// MergeContextCancel returns a context which has the properties of the 1st +// input content and merges in the cancellation signal of the 2nd context, so +// the returned context is cancelled when either input context is cancelled. +// +// See (and adapted from): https://pkg.go.dev/context#example-AfterFunc-Merge +func MergeContextCancel(ctx, cancelCtx context.Context) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancelCause(ctx) + stop := context.AfterFunc(cancelCtx, func() { + cancel(context.Cause(cancelCtx)) + }) + return ctx, func() { + stop() + cancel(context.Canceled) + } +} diff --git a/psiphon/common/values/init.go b/psiphon/common/values/init.go index 40063d207..328855b1e 100644 --- a/psiphon/common/values/init.go +++ b/psiphon/common/values/init.go @@ -1,3 +1,4 @@ +//go:build PSIPHON_INIT_COMMON_VALUES // +build PSIPHON_INIT_COMMON_VALUES /* diff --git a/psiphon/config.go b/psiphon/config.go index 821362420..b5e30e333 100755 --- a/psiphon/config.go +++ b/psiphon/config.go @@ -338,7 +338,7 @@ type Config struct { // (Windows VPN mode). DisableApi bool - // TargetApiProtocol specifies whether to force use of "ssh" or "web" API + // TargetAPIProtocol specifies whether to force use of "ssh" or "web" API // protocol. When blank, the default, the optimal API protocol is used. // Note that this capability check is not applied before the // "CandidateServers" count is emitted. @@ -346,7 +346,12 @@ type Config struct { // This parameter is intended for testing and debugging only. Not all // parameters are supported in the legacy "web" API protocol, including // speed test samples. - TargetApiProtocol string + TargetAPIProtocol string + + // TargetAPIProtocol specifies whether to use "json" or "cbor" API + // protocol parameter encodings. When blank, the default is to use "cbor" + // where supported. + TargetAPIEncoding string // RemoteServerListURLs is list of URLs which specify locations to fetch // out-of-band server entries. This facility is used when a tunnel cannot @@ -479,7 +484,7 @@ type Config struct { // distributed or displayed to users. Default is off. EmitDiagnosticNetworkParameters bool - // EmitBytesTransferred indicates whether to emit periodic notices showing + // EmitBytesTransferred indicates whether to emit frequent notices showing // bytes sent and received. EmitBytesTransferred bool @@ -614,6 +619,91 @@ type Config struct { // migrations are performed. MigrateUpgradeDownloadFilename string + // DisableTunnels disables establishing a client tunnel. Set + // DisableTunnels when running a stand-alone in-proxy proxy. + DisableTunnels bool + + // InproxyEnableProxy enables running an in-proxy proxy. + InproxyEnableProxy bool + + // InproxyProxySessionPrivateKey specifies a long-term in-proxy proxy + // private key and corresponding, derived proxy ID to use. If blank, an + // ephemeral key will be generated. + InproxyProxySessionPrivateKey string + + // InproxyMaxClients specifies the maximum number of in-proxy clients to + // be proxied concurrently. + InproxyMaxClients int + + // InproxyLimitUpstreamBytesPerSecond specifies the upstream byte transfer + // rate limit for each proxied client. When 0, there is no limit. + InproxyLimitUpstreamBytesPerSecond int + + // InproxyLimitDownstreamBytesPerSecond specifies the downstream byte + // transfer rate limit for each proxied client. When 0, there is no limit. + InproxyLimitDownstreamBytesPerSecond int + + // InproxyProxyPersonalCompartmentIDs specifies the personal compartment + // IDs used by an in-proxy proxy. Personal compartment IDs are + // distributed from proxy operators to client users out-of-band and + // provide a mechanism to allow only certain clients to use a proxy. + // + // See InproxyClientPersonalCompartmentIDs comment for limitations. + InproxyProxyPersonalCompartmentIDs []string + + // InproxyClientPersonalCompartmentIDs specifies the personal compartment + // IDs used by an in-proxy client. Personal compartment IDs are + // distributed from proxy operators to client users out-of-band and + // provide a mechanism to ensure a client uses only a certain proxy for + // all tunnels connections. + // + // When InproxyClientPersonalCompartmentIDs is set, the client will use + // only in-proxy protocols, ensuring that all connections go through the + // proxy or proxies with the same personal compartment IDs. + // + // Limitations: + // + // - While fully functional, the personal pairing mode has a number of + // limitations that make the current implementation less suitable for + // large scale deployment. + // + // - Since the mode requires an in-proxy connection to a proxy, announcing + // with the corresponding personal compartment ID, not only must that + // proxy be available, but also a broker, and both the client and proxy + // must rendezvous at the same broker. + // + // - Currently, the client tunnel establishment algorithm does not launch + // an untunneled tactics request as long as there is a cached tactics + // with a valid TTL. The assumption, in regular mode, is that the + // cached tactics will suffice, and any new tactics will be obtained + // from any Psiphon server connection. Since broker specs are obtained + // solely from tactics, if brokers are removed, reconfigured, or even + // if the order is changed, personal mode may fail to connect until + // cached tactics expire. + // + // - In personal mode, clients and proxies use a simplistic approach to + // rendezvous: always select the first broker spec. This works, but is + // not robust in terms of load balancing, and fails if the first broker + // is unreachable or overloaded. Non-personal in-proxy dials can simply + // use any available broker. + // + // - The broker matching queues lack compartment ID indexing. For a + // handful of common compartment IDs, this is not expected to be an + // issue. For personal compartment IDs, this may lead to frequency + // near-full scans of the queues when looking for a match. + // + // - In personal mode, all establishment candidates must be in-proxy + // dials, all using the same broker. Many concurrent, fronted broker + // requests may result in CDN rate limiting, requiring some mechanism + // to delay or spread the requests, as is currently done only for + // batches of proxy announcements. + // + InproxyClientPersonalCompartmentIDs []string + + // EmitInproxyProxyActivity indicates whether to emit frequent notices + // showing proxy connection information and bytes transferred. + EmitInproxyProxyActivity bool + // // The following parameters are deprecated. // @@ -907,12 +997,68 @@ type Config struct { SteeringIPCacheMaxEntries *int SteeringIPProbability *float64 + // The following in-proxy fields are for testing purposes only. + InproxyAllowProxy *bool + InproxyAllowClient *bool + InproxyTunnelProtocolSelectionProbability *float64 + InproxyBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyClientBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyProxyBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyReplayBrokerDialParametersTTLSeconds *int + InproxyReplayBrokerUpdateFrequencySeconds *int + InproxyReplayBrokerDialParametersProbability *float64 + InproxyReplayBrokerRetainFailedProbability *float64 + InproxyCommonCompartmentIDs parameters.InproxyCompartmentIDsValue + InproxyMaxCompartmentIDListLength *int + InproxyProxyAnnounceRequestTimeoutMilliseconds *int + InproxyProxyAnnounceDelayMilliseconds *int + InproxyProxyAnnounceDelayJitter *float64 + InproxyProxyAnswerRequestTimeoutMilliseconds *int + InproxyClientOfferRequestTimeoutMilliseconds *int + InproxyClientOfferRetryDelayMilliseconds *int + InproxyClientOfferRetryJitter *float64 + InproxyClientRelayedPacketRequestTimeoutMilliseconds *int + InproxyDTLSRandomizationProbability *float64 + InproxyDataChannelTrafficShapingProbability *float64 + InproxyDataChannelTrafficShapingParameters *parameters.InproxyDataChannelTrafficShapingParametersValue + InproxySTUNServerAddresses []string + InproxySTUNServerAddressesRFC5780 []string + InproxyProxySTUNServerAddresses []string + InproxyProxySTUNServerAddressesRFC5780 []string + InproxyClientSTUNServerAddresses []string + InproxyClientSTUNServerAddressesRFC5780 []string + InproxyClientDiscoverNATProbability *float64 + InproxyDisableSTUN *bool + InproxyDisablePortMapping *bool + InproxyDisableInboundForMobileNetworks *bool + InproxyDisableIPv6ICECandidates *bool + InproxyProxyDisableSTUN *bool + InproxyProxyDisablePortMapping *bool + InproxyProxyDisableInboundForMobileNetworks *bool + InproxyProxyDisableIPv6ICECandidates *bool + InproxyClientDisableSTUN *bool + InproxyClientDisablePortMapping *bool + InproxyClientDisableInboundForMobileNetworks *bool + InproxyClientDisableIPv6ICECandidates *bool + InproxyProxyDiscoverNATTimeoutMilliseconds *int + InproxyClientDiscoverNATTimeoutMilliseconds *int + InproxyWebRTCAnswerTimeoutMilliseconds *int + InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds *int + InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds *int + InproxyProxyDestinationDialTimeoutMilliseconds *int + InproxyPsiphonAPIRequestTimeoutMilliseconds *int + InproxyProxyTotalActivityNoticePeriodMilliseconds *int + + InproxySkipAwaitFullyConnected bool + InproxyEnableWebRTCDebugLogging bool + // params is the active parameters.Parameters with defaults, config values, // and, optionally, tactics applied. // // New tactics must be applied by calling Config.SetParameters; calling // params.Set directly will fail to add config values. - params *parameters.Parameters + paramsMutex sync.Mutex + params *parameters.Parameters dialParametersHash []byte @@ -931,6 +1077,17 @@ type Config struct { committed bool loadTimestamp string + + tacticsAppliedReceiversMutex sync.Mutex + tacticsAppliedReceivers []TacticsAppliedReceiver +} + +// TacticsAppliedReceiver specifies the interface for a component that is +// signaled when tactics are applied. TacticsApplied is invoked when any +// tactics are applied after initial start up, and then whenever new tactics +// are received and applied while running. +type TacticsAppliedReceiver interface { + TacticsApplied() error } // Config field which specifies if notice files should be used and at which @@ -1200,11 +1357,16 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { return errors.Tracef("invalid client version: %s", err) } - if !common.Contains( - []string{"", protocol.PSIPHON_SSH_API_PROTOCOL, protocol.PSIPHON_WEB_API_PROTOCOL}, - config.TargetApiProtocol) { + if config.TargetAPIProtocol != "" && + !protocol.PsiphonAPIProtocolIsValid(config.TargetAPIProtocol) { + + return errors.TraceNew("invalid TargetAPIProtocol") + } + + if config.TargetAPIEncoding != "" && + !protocol.PsiphonAPIEncodingIsValid(config.TargetAPIEncoding) { - return errors.TraceNew("invalid TargetApiProtocol") + return errors.TraceNew("invalid TargetAPIEncoding") } if !config.DisableRemoteServerListFetcher { @@ -1240,6 +1402,21 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { } } + if config.ObfuscatedSSHAlgorithms != nil && + len(config.ObfuscatedSSHAlgorithms) != 4 { + // TODO: validate each algorithm? + return errors.TraceNew("invalid ObfuscatedSSHAlgorithms") + } + + if !config.DisableTunnels && config.InproxyEnableProxy && + common.ContainsAny( + config.InproxyProxyPersonalCompartmentIDs, + config.InproxyClientPersonalCompartmentIDs) { + + // Don't allow an in-proxy client and proxy run in the same app to match. + return errors.TraceNew("invalid overlapping personal compartment IDs") + } + // This constraint is expected by logic in Controller.runTunnels(). if config.PacketTunnelTunFileDescriptor > 0 && config.TunnelPoolSize != 1 { @@ -1263,20 +1440,16 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { return errors.TraceNew("invalid SessionID") } + config.paramsMutex.Lock() config.params, err = parameters.NewParameters( func(err error) { NoticeWarning("Parameters getValue failed: %s", err) }) + config.paramsMutex.Unlock() if err != nil { return errors.Trace(err) } - if config.ObfuscatedSSHAlgorithms != nil && - len(config.ObfuscatedSSHAlgorithms) != 4 { - // TODO: validate each algorithm? - return errors.TraceNew("invalid ObfuscatedSSHAlgorithms") - } - // parametersParameters.Set will validate the config fields applied to // parametersParameters. @@ -1404,6 +1577,9 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { // GetParameters returns the current parameters.Parameters. func (config *Config) GetParameters() *parameters.Parameters { + config.paramsMutex.Lock() + defer config.paramsMutex.Unlock() + return config.params } @@ -1427,15 +1603,27 @@ func (config *Config) SetParameters(tag string, skipOnError bool, applyParameter setParameters = append(setParameters, applyParameters) } - counts, err := config.params.Set(tag, skipOnError, setParameters...) + // Don't hold the lock on config.paramsMutex when signalling + // GetTacticsAppliedReceivers, or else GetParameters will deadlock. + // Releasing the lock early here also ensures we don't hold the lock when + // posting notices. + + config.paramsMutex.Lock() + validationFlags := 0 + if skipOnError { + validationFlags |= parameters.ValidationSkipOnError + } + counts, err := config.params.Set(tag, validationFlags, setParameters...) if err != nil { + config.paramsMutex.Unlock() return errors.Trace(err) } + p := config.params.Get() + config.paramsMutex.Unlock() NoticeInfo("applied %v parameters with tag '%s'", counts, tag) // Emit certain individual parameter values for quick reference in diagnostics. - p := config.params.Get() NoticeInfo( "NetworkLatencyMultiplier Min/Max/Lambda: %f/%f/%f", p.Float(parameters.NetworkLatencyMultiplierMin), @@ -1447,6 +1635,21 @@ func (config *Config) SetParameters(tag string, skipOnError bool, applyParameter // Emit these now, as notices. if p.WeightedCoinFlip(parameters.ApplicationParametersProbability) { NoticeApplicationParameters(p.KeyValues(parameters.ApplicationParameters)) + } else { + // The front end may persist Application Parameters, so clear any previously + // persisted values. + NoticeApplicationParameters(parameters.KeyValues{}) + } + + // Signal all registered TacticsAppliedReceivers that new tactics have + // been applied. Each receiver is responsible for checking if its + // individual tactics parameters have actually changed. + for _, receiver := range config.GetTacticsAppliedReceivers() { + err := receiver.TacticsApplied() + if err != nil { + NoticeError("TacticsApplied failed: %v", err) + // Log and continue running. + } } return nil @@ -1466,6 +1669,21 @@ func (config *Config) GetResolver() *resolver.Resolver { return config.resolver } +// SetTacticsAppliedReceivers registers the list of TacticsAppliedReceivers. +func (config *Config) SetTacticsAppliedReceivers(receivers []TacticsAppliedReceiver) { + config.tacticsAppliedReceiversMutex.Lock() + defer config.tacticsAppliedReceiversMutex.Unlock() + config.tacticsAppliedReceivers = receivers +} + +// GetTacticsAppliedReceivers gets the list of registered +// TacticsAppliedReceivers. +func (config *Config) GetTacticsAppliedReceivers() []TacticsAppliedReceiver { + config.tacticsAppliedReceiversMutex.Lock() + defer config.tacticsAppliedReceiversMutex.Unlock() + return config.tacticsAppliedReceivers +} + // SetDynamicConfig sets the current client sponsor ID and authorizations. // Invalid values for sponsor ID are ignored. The caller must not modify the // input authorizations slice. @@ -2157,6 +2375,206 @@ func (config *Config) makeConfigParameters() map[string]interface{} { applyParameters[parameters.SteeringIPProbability] = *config.SteeringIPProbability } + if config.InproxyAllowProxy != nil { + applyParameters[parameters.InproxyAllowProxy] = *config.InproxyAllowProxy + } + + if config.InproxyAllowClient != nil { + applyParameters[parameters.InproxyAllowClient] = *config.InproxyAllowClient + } + + if config.InproxyTunnelProtocolSelectionProbability != nil { + applyParameters[parameters.InproxyTunnelProtocolSelectionProbability] = *config.InproxyTunnelProtocolSelectionProbability + } + + if len(config.InproxyBrokerSpecs) > 0 { + applyParameters[parameters.InproxyBrokerSpecs] = config.InproxyBrokerSpecs + } + + if len(config.InproxyProxyBrokerSpecs) > 0 { + applyParameters[parameters.InproxyProxyBrokerSpecs] = config.InproxyProxyBrokerSpecs + } + + if len(config.InproxyClientBrokerSpecs) > 0 { + applyParameters[parameters.InproxyClientBrokerSpecs] = config.InproxyClientBrokerSpecs + } + + if config.InproxyReplayBrokerDialParametersTTLSeconds != nil { + applyParameters[parameters.InproxyReplayBrokerDialParametersTTL] = fmt.Sprintf("%ds", *config.InproxyReplayBrokerDialParametersTTLSeconds) + } + + if config.InproxyReplayBrokerUpdateFrequencySeconds != nil { + applyParameters[parameters.InproxyReplayBrokerUpdateFrequency] = fmt.Sprintf("%ds", *config.InproxyReplayBrokerUpdateFrequencySeconds) + } + + if config.InproxyReplayBrokerDialParametersProbability != nil { + applyParameters[parameters.InproxyReplayBrokerDialParametersProbability] = *config.InproxyReplayBrokerDialParametersProbability + } + + if config.InproxyReplayBrokerRetainFailedProbability != nil { + applyParameters[parameters.InproxyReplayBrokerRetainFailedProbability] = *config.InproxyReplayBrokerRetainFailedProbability + } + + if len(config.InproxyCommonCompartmentIDs) > 0 { + applyParameters[parameters.InproxyCommonCompartmentIDs] = config.InproxyCommonCompartmentIDs + } + + if config.InproxyMaxCompartmentIDListLength != nil { + applyParameters[parameters.InproxyMaxCompartmentIDListLength] = *config.InproxyMaxCompartmentIDListLength + } + + if config.InproxyProxyAnnounceRequestTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyProxyAnnounceRequestTimeout] = fmt.Sprintf("%dms", *config.InproxyProxyAnnounceRequestTimeoutMilliseconds) + } + + if config.InproxyProxyAnnounceDelayMilliseconds != nil { + applyParameters[parameters.InproxyProxyAnnounceDelay] = fmt.Sprintf("%dms", *config.InproxyProxyAnnounceDelayMilliseconds) + } + + if config.InproxyProxyAnnounceDelayJitter != nil { + applyParameters[parameters.InproxyProxyAnnounceDelayJitter] = *config.InproxyProxyAnnounceDelayJitter + } + + if config.InproxyProxyAnswerRequestTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyProxyAnswerRequestTimeout] = fmt.Sprintf("%dms", *config.InproxyProxyAnswerRequestTimeoutMilliseconds) + } + + if config.InproxyClientOfferRequestTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyClientOfferRequestTimeout] = fmt.Sprintf("%dms", *config.InproxyClientOfferRequestTimeoutMilliseconds) + } + + if config.InproxyClientOfferRetryDelayMilliseconds != nil { + applyParameters[parameters.InproxyClientOfferRetryDelay] = fmt.Sprintf("%dms", *config.InproxyClientOfferRetryDelayMilliseconds) + } + + if config.InproxyClientOfferRetryJitter != nil { + applyParameters[parameters.InproxyClientOfferRetryJitter] = *config.InproxyClientOfferRetryJitter + } + + if config.InproxyClientRelayedPacketRequestTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyClientRelayedPacketRequestTimeout] = fmt.Sprintf("%dms", *config.InproxyClientRelayedPacketRequestTimeoutMilliseconds) + } + + if config.InproxyDTLSRandomizationProbability != nil { + applyParameters[parameters.InproxyDTLSRandomizationProbability] = *config.InproxyDTLSRandomizationProbability + } + + if config.InproxyDataChannelTrafficShapingProbability != nil { + applyParameters[parameters.InproxyDataChannelTrafficShapingProbability] = *config.InproxyDataChannelTrafficShapingProbability + } + + if config.InproxyDataChannelTrafficShapingParameters != nil { + applyParameters[parameters.InproxyDataChannelTrafficShapingParameters] = *config.InproxyDataChannelTrafficShapingParameters + } + + if len(config.InproxySTUNServerAddresses) > 0 { + applyParameters[parameters.InproxySTUNServerAddresses] = config.InproxySTUNServerAddresses + } + + if len(config.InproxySTUNServerAddressesRFC5780) > 0 { + applyParameters[parameters.InproxySTUNServerAddressesRFC5780] = config.InproxySTUNServerAddressesRFC5780 + } + + if len(config.InproxyProxySTUNServerAddresses) > 0 { + applyParameters[parameters.InproxyProxySTUNServerAddresses] = config.InproxyProxySTUNServerAddresses + } + + if len(config.InproxyProxySTUNServerAddressesRFC5780) > 0 { + applyParameters[parameters.InproxyProxySTUNServerAddressesRFC5780] = config.InproxyProxySTUNServerAddressesRFC5780 + } + + if len(config.InproxyClientSTUNServerAddresses) > 0 { + applyParameters[parameters.InproxyClientSTUNServerAddresses] = config.InproxyClientSTUNServerAddresses + } + + if len(config.InproxyClientSTUNServerAddressesRFC5780) > 0 { + applyParameters[parameters.InproxyClientSTUNServerAddressesRFC5780] = config.InproxyClientSTUNServerAddressesRFC5780 + } + + if config.InproxyClientDiscoverNATProbability != nil { + applyParameters[parameters.InproxyClientDiscoverNATProbability] = *config.InproxyClientDiscoverNATProbability + } + + if config.InproxyDisableSTUN != nil { + applyParameters[parameters.InproxyDisableSTUN] = *config.InproxyDisableSTUN + } + + if config.InproxyDisablePortMapping != nil { + applyParameters[parameters.InproxyDisablePortMapping] = *config.InproxyDisablePortMapping + } + + if config.InproxyDisableInboundForMobileNetworks != nil { + applyParameters[parameters.InproxyDisableInboundForMobileNetworks] = *config.InproxyDisableInboundForMobileNetworks + } + + if config.InproxyDisableIPv6ICECandidates != nil { + applyParameters[parameters.InproxyDisableIPv6ICECandidates] = *config.InproxyDisableIPv6ICECandidates + } + + if config.InproxyProxyDisableSTUN != nil { + applyParameters[parameters.InproxyProxyDisableSTUN] = *config.InproxyProxyDisableSTUN + } + + if config.InproxyProxyDisablePortMapping != nil { + applyParameters[parameters.InproxyProxyDisablePortMapping] = *config.InproxyProxyDisablePortMapping + } + + if config.InproxyProxyDisableInboundForMobileNetworks != nil { + applyParameters[parameters.InproxyProxyDisableInboundForMobileNetworks] = *config.InproxyProxyDisableInboundForMobileNetworks + } + + if config.InproxyProxyDisableIPv6ICECandidates != nil { + applyParameters[parameters.InproxyProxyDisableIPv6ICECandidates] = *config.InproxyProxyDisableIPv6ICECandidates + } + + if config.InproxyClientDisableSTUN != nil { + applyParameters[parameters.InproxyClientDisableSTUN] = *config.InproxyClientDisableSTUN + } + + if config.InproxyClientDisablePortMapping != nil { + applyParameters[parameters.InproxyClientDisablePortMapping] = *config.InproxyClientDisablePortMapping + } + + if config.InproxyClientDisableInboundForMobileNetworks != nil { + applyParameters[parameters.InproxyClientDisableInboundForMobileNetworks] = *config.InproxyClientDisableInboundForMobileNetworks + } + + if config.InproxyClientDisableIPv6ICECandidates != nil { + applyParameters[parameters.InproxyClientDisableIPv6ICECandidates] = *config.InproxyClientDisableIPv6ICECandidates + } + + if config.InproxyProxyDiscoverNATTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyProxyDiscoverNATTimeout] = fmt.Sprintf("%dms", *config.InproxyProxyDiscoverNATTimeoutMilliseconds) + } + + if config.InproxyClientDiscoverNATTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyClientDiscoverNATTimeout] = fmt.Sprintf("%dms", *config.InproxyClientDiscoverNATTimeoutMilliseconds) + } + + if config.InproxyWebRTCAnswerTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyWebRTCAnswerTimeout] = fmt.Sprintf("%dms", *config.InproxyWebRTCAnswerTimeoutMilliseconds) + } + + if config.InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyProxyWebRTCAwaitDataChannelTimeout] = fmt.Sprintf("%dms", *config.InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds) + } + + if config.InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyClientWebRTCAwaitDataChannelTimeout] = fmt.Sprintf("%dms", *config.InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds) + } + + if config.InproxyProxyDestinationDialTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyProxyDestinationDialTimeout] = fmt.Sprintf("%dms", *config.InproxyProxyDestinationDialTimeoutMilliseconds) + } + + if config.InproxyPsiphonAPIRequestTimeoutMilliseconds != nil { + applyParameters[parameters.InproxyPsiphonAPIRequestTimeout] = fmt.Sprintf("%dms", *config.InproxyPsiphonAPIRequestTimeoutMilliseconds) + } + + if config.InproxyProxyTotalActivityNoticePeriodMilliseconds != nil { + applyParameters[parameters.InproxyProxyTotalActivityNoticePeriod] = fmt.Sprintf("%dms", *config.InproxyProxyTotalActivityNoticePeriodMilliseconds) + } + // When adding new config dial parameters that may override tactics, also // update setDialParametersHash. @@ -2757,6 +3175,195 @@ func (config *Config) setDialParametersHash() { // Steering IPs are ephemeral and not replayed, so steering IP parameters // are excluded here. + if config.InproxyTunnelProtocolSelectionProbability != nil { + hash.Write([]byte("InproxyTunnelProtocolSelectionProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyTunnelProtocolSelectionProbability) + } + if len(config.InproxyBrokerSpecs) > 0 { + hash.Write([]byte("InproxyBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyBrokerSpecs))) + } + if len(config.InproxyProxyBrokerSpecs) > 0 { + hash.Write([]byte("InproxyProxyBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxyBrokerSpecs))) + } + if len(config.InproxyClientBrokerSpecs) > 0 { + hash.Write([]byte("InproxyClientBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyClientBrokerSpecs))) + } + if config.InproxyReplayBrokerDialParametersTTLSeconds != nil { + hash.Write([]byte("InproxyReplayBrokerDialParametersTTLSeconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyReplayBrokerDialParametersTTLSeconds)) + } + if config.InproxyReplayBrokerUpdateFrequencySeconds != nil { + hash.Write([]byte("InproxyReplayBrokerUpdateFrequencySeconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyReplayBrokerUpdateFrequencySeconds)) + } + if config.InproxyReplayBrokerDialParametersProbability != nil { + hash.Write([]byte("InproxyReplayBrokerDialParametersProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyReplayBrokerDialParametersProbability) + } + if config.InproxyReplayBrokerRetainFailedProbability != nil { + hash.Write([]byte("InproxyReplayBrokerRetainFailedProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyReplayBrokerRetainFailedProbability) + } + if len(config.InproxyCommonCompartmentIDs) > 0 { + hash.Write([]byte("InproxyCommonCompartmentIDs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyCommonCompartmentIDs))) + } + if config.InproxyMaxCompartmentIDListLength != nil { + hash.Write([]byte("InproxyMaxCompartmentIDListLength")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyMaxCompartmentIDListLength)) + } + if config.InproxyProxyAnnounceRequestTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyProxyAnnounceRequestTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyAnnounceRequestTimeoutMilliseconds)) + } + if config.InproxyProxyAnnounceDelayMilliseconds != nil { + hash.Write([]byte("InproxyProxyAnnounceDelayMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyAnnounceDelayMilliseconds)) + } + if config.InproxyProxyAnnounceDelayJitter != nil { + hash.Write([]byte("InproxyProxyAnnounceDelayJitter")) + binary.Write(hash, binary.LittleEndian, *config.InproxyProxyAnnounceDelayJitter) + } + if config.InproxyProxyAnswerRequestTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyProxyAnswerRequestTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyAnswerRequestTimeoutMilliseconds)) + } + if config.InproxyClientOfferRequestTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyClientOfferRequestTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyClientOfferRequestTimeoutMilliseconds)) + } + if config.InproxyClientOfferRetryDelayMilliseconds != nil { + hash.Write([]byte("InproxyClientOfferRetryDelayMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyClientOfferRetryDelayMilliseconds)) + } + if config.InproxyClientOfferRetryJitter != nil { + hash.Write([]byte("InproxyClientOfferRetryJitter")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientOfferRetryJitter) + } + if config.InproxyClientRelayedPacketRequestTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyClientRelayedPacketRequestTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyClientRelayedPacketRequestTimeoutMilliseconds)) + } + if config.InproxyDTLSRandomizationProbability != nil { + hash.Write([]byte("InproxyDTLSRandomizationProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDTLSRandomizationProbability) + } + if config.InproxyDataChannelTrafficShapingProbability != nil { + hash.Write([]byte("InproxyDataChannelTrafficShapingProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDataChannelTrafficShapingProbability) + } + if config.InproxyDataChannelTrafficShapingParameters != nil { + hash.Write([]byte("InproxyDataChannelTrafficShapingParameters")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyDataChannelTrafficShapingParameters))) + } + if config.InproxySTUNServerAddresses != nil { + hash.Write([]byte("InproxySTUNServerAddresses")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxySTUNServerAddresses))) + } + if config.InproxySTUNServerAddressesRFC5780 != nil { + hash.Write([]byte("InproxySTUNServerAddressesRFC5780")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxySTUNServerAddressesRFC5780))) + } + if config.InproxyProxySTUNServerAddresses != nil { + hash.Write([]byte("InproxyProxySTUNServerAddresses")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxySTUNServerAddresses))) + } + if config.InproxyProxySTUNServerAddressesRFC5780 != nil { + hash.Write([]byte("InproxyProxySTUNServerAddressesRFC5780")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxySTUNServerAddressesRFC5780))) + } + if config.InproxyClientSTUNServerAddresses != nil { + hash.Write([]byte("InproxyClientSTUNServerAddresses")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyClientSTUNServerAddresses))) + } + if config.InproxyClientSTUNServerAddressesRFC5780 != nil { + hash.Write([]byte("InproxyClientSTUNServerAddressesRFC5780")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyClientSTUNServerAddressesRFC5780))) + } + if config.InproxyClientDiscoverNATProbability != nil { + hash.Write([]byte("InproxyClientDiscoverNATProbability")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientDiscoverNATProbability) + } + if config.InproxyDisableSTUN != nil { + hash.Write([]byte("InproxyDisableSTUN")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDisableSTUN) + } + if config.InproxyDisablePortMapping != nil { + hash.Write([]byte("InproxyDisablePortMapping")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDisablePortMapping) + } + if config.InproxyDisableInboundForMobileNetworks != nil { + hash.Write([]byte("InproxyDisableInboundForMobileNetworks")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDisableInboundForMobileNetworks) + } + if config.InproxyDisableIPv6ICECandidates != nil { + hash.Write([]byte("InproxyDisableIPv6ICECandidates")) + binary.Write(hash, binary.LittleEndian, *config.InproxyDisableIPv6ICECandidates) + } + if config.InproxyProxyDisableSTUN != nil { + hash.Write([]byte("InproxyProxyDisableSTUN")) + binary.Write(hash, binary.LittleEndian, *config.InproxyProxyDisableSTUN) + } + if config.InproxyProxyDisablePortMapping != nil { + hash.Write([]byte("InproxyProxyDisablePortMapping")) + binary.Write(hash, binary.LittleEndian, *config.InproxyProxyDisablePortMapping) + } + if config.InproxyProxyDisableInboundForMobileNetworks != nil { + hash.Write([]byte("InproxyProxyDisableInboundForMobileNetworks")) + binary.Write(hash, binary.LittleEndian, *config.InproxyProxyDisableInboundForMobileNetworks) + } + if config.InproxyProxyDisableIPv6ICECandidates != nil { + hash.Write([]byte("InproxyProxyDisableIPv6ICECandidates")) + binary.Write(hash, binary.LittleEndian, *config.InproxyProxyDisableIPv6ICECandidates) + } + if config.InproxyClientDisableSTUN != nil { + hash.Write([]byte("InproxyClientDisableSTUN")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientDisableSTUN) + } + if config.InproxyClientDisablePortMapping != nil { + hash.Write([]byte("InproxyClientDisablePortMapping")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientDisablePortMapping) + } + if config.InproxyClientDisableInboundForMobileNetworks != nil { + hash.Write([]byte("InproxyClientDisableInboundForMobileNetworks")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientDisableInboundForMobileNetworks) + } + if config.InproxyClientDisableIPv6ICECandidates != nil { + hash.Write([]byte("InproxyClientDisableIPv6ICECandidates")) + binary.Write(hash, binary.LittleEndian, *config.InproxyClientDisableIPv6ICECandidates) + } + if config.InproxyProxyDiscoverNATTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyProxyDiscoverNATTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyDiscoverNATTimeoutMilliseconds)) + } + if config.InproxyClientDiscoverNATTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyClientDiscoverNATTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyClientDiscoverNATTimeoutMilliseconds)) + } + if config.InproxyWebRTCAnswerTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyWebRTCAnswerTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyWebRTCAnswerTimeoutMilliseconds)) + } + if config.InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyWebRTCAwaitDataChannelTimeoutMilliseconds)) + } + if config.InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyClientWebRTCAwaitDataChannelTimeoutMilliseconds)) + } + if config.InproxyProxyDestinationDialTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyProxyDestinationDialTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyProxyDestinationDialTimeoutMilliseconds)) + } + if config.InproxyPsiphonAPIRequestTimeoutMilliseconds != nil { + hash.Write([]byte("InproxyPsiphonAPIRequestTimeoutMilliseconds")) + binary.Write(hash, binary.LittleEndian, int64(*config.InproxyPsiphonAPIRequestTimeoutMilliseconds)) + } + config.dialParametersHash = hash.Sum(nil) } diff --git a/psiphon/controller.go b/psiphon/controller.go index 47e354437..9d8997cd1 100755 --- a/psiphon/controller.go +++ b/psiphon/controller.go @@ -25,6 +25,7 @@ package psiphon import ( "context" + "encoding/json" "fmt" "math/rand" "net" @@ -35,10 +36,12 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/resolver" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun" lrucache "github.com/cognusion/go-cache-lru" ) @@ -88,6 +91,11 @@ type Controller struct { staggerMutex sync.Mutex resolver *resolver.Resolver steeringIPCache *lrucache.Cache + inproxyProxyBrokerClientManager *InproxyBrokerClientManager + inproxyClientBrokerClientManager *InproxyBrokerClientManager + inproxyNATStateManager *InproxyNATStateManager + inproxyHandleTacticsMutex sync.Mutex + inproxyLastStoredTactics time.Time } // NewController initializes a new controller. @@ -193,7 +201,7 @@ func NewController(config *Config) (controller *Controller, err error) { packetTunnelTransport := NewPacketTunnelTransport() packetTunnelClient, err := tun.NewClient(&tun.ClientConfig{ - Logger: NoticeCommonLogger(), + Logger: NoticeCommonLogger(false), TunFileDescriptor: config.PacketTunnelTunFileDescriptor, TransparentDNSIPv4Address: config.PacketTunnelTransparentDNSIPv4Address, TransparentDNSIPv6Address: config.PacketTunnelTransparentDNSIPv6Address, @@ -207,6 +215,49 @@ func NewController(config *Config) (controller *Controller, err error) { controller.packetTunnelTransport = packetTunnelTransport } + // Initialize shared in-proxy broker clients to be used for all in-proxy + // client dials and in-proxy proxy operations. + // + // Using shared broker connections minimizes the overhead of establishing + // broker connections at the start of an in-proxy dial or operation. By + // design, established broker connections will be retained for up to the + // entire lifetime of the controller run, so past the end of client + // tunnel establishment. + // + // No network operations are performed by NewInproxyBrokerClientManager or + // NewInproxyNATStateManager; each manager operates on demand, when + // in-proxy dials or operations are invoked. + // + // The controller run may include client tunnel establishment, in-proxy + // proxy operations, or both. + // + // Due to the inproxy.InitiatorSessions.NewRoundTrip waitToShareSession + // application-level round trip limitation, there is one broker client + // manager for each of the client and proxy cases, so that neither + // initially blocks while trying to share the others session. + // + // One NAT state manager is shared between both the in-proxy client and + // proxy. While each may have different network discovery policies, any + // discovered network state is valid and useful for both consumers. + + // Both broker client and NAT state managers may require resets and update + // when tactics change. + var tacticAppliedReceivers []TacticsAppliedReceiver + + isProxy := false + controller.inproxyClientBrokerClientManager = NewInproxyBrokerClientManager(config, isProxy) + tacticAppliedReceivers = append(tacticAppliedReceivers, controller.inproxyClientBrokerClientManager) + controller.inproxyNATStateManager = NewInproxyNATStateManager(config) + tacticAppliedReceivers = append(tacticAppliedReceivers, controller.inproxyNATStateManager) + + if config.InproxyEnableProxy { + isProxy = true + controller.inproxyProxyBrokerClientManager = NewInproxyBrokerClientManager(config, isProxy) + tacticAppliedReceivers = append(tacticAppliedReceivers, controller.inproxyProxyBrokerClientManager) + } + + controller.config.SetTacticsAppliedReceivers(tacticAppliedReceivers) + return controller, nil } @@ -266,62 +317,78 @@ func (controller *Controller) Run(ctx context.Context) { listenIP = IPv4Address.String() } - if !controller.config.DisableLocalSocksProxy { - socksProxy, err := NewSocksProxy(controller.config, controller, listenIP) - if err != nil { - NoticeError("error initializing local SOCKS proxy: %v", errors.Trace(err)) - return + // The controller run may include client tunnel establishment, in-proxy + // proxy operations, or both. Local tactics are shared between both modes + // and both modes can fetch tactics. + // + // Limitation: the upgrade downloader is not enabled when client tunnel + // establishment is disabled; upgrade version information is not + // currently distributed to in-proxy proxies + + if !controller.config.DisableTunnels { + + if !controller.config.DisableLocalSocksProxy { + socksProxy, err := NewSocksProxy(controller.config, controller, listenIP) + if err != nil { + NoticeError("error initializing local SOCKS proxy: %v", errors.Trace(err)) + return + } + defer socksProxy.Close() } - defer socksProxy.Close() - } - if !controller.config.DisableLocalHTTPProxy { - httpProxy, err := NewHttpProxy(controller.config, controller, listenIP) - if err != nil { - NoticeError("error initializing local HTTP proxy: %v", errors.Trace(err)) - return + if !controller.config.DisableLocalHTTPProxy { + httpProxy, err := NewHttpProxy(controller.config, controller, listenIP) + if err != nil { + NoticeError("error initializing local HTTP proxy: %v", errors.Trace(err)) + return + } + defer httpProxy.Close() } - defer httpProxy.Close() - } - if !controller.config.DisableRemoteServerListFetcher { + if !controller.config.DisableRemoteServerListFetcher { - if controller.config.RemoteServerListURLs != nil { - controller.runWaitGroup.Add(1) - go controller.remoteServerListFetcher( - "common", - FetchCommonRemoteServerList, - controller.signalFetchCommonRemoteServerList) + if controller.config.RemoteServerListURLs != nil { + controller.runWaitGroup.Add(1) + go controller.remoteServerListFetcher( + "common", + FetchCommonRemoteServerList, + controller.signalFetchCommonRemoteServerList) + } + + if controller.config.ObfuscatedServerListRootURLs != nil { + controller.runWaitGroup.Add(1) + go controller.remoteServerListFetcher( + "obfuscated", + FetchObfuscatedServerLists, + controller.signalFetchObfuscatedServerLists) + } } - if controller.config.ObfuscatedServerListRootURLs != nil { + if controller.config.EnableUpgradeDownload { controller.runWaitGroup.Add(1) - go controller.remoteServerListFetcher( - "obfuscated", - FetchObfuscatedServerLists, - controller.signalFetchObfuscatedServerLists) + go controller.upgradeDownloader() } - } - if controller.config.EnableUpgradeDownload { controller.runWaitGroup.Add(1) - go controller.upgradeDownloader() - } + go controller.serverEntriesReporter() - controller.runWaitGroup.Add(1) - go controller.serverEntriesReporter() + controller.runWaitGroup.Add(1) + go controller.connectedReporter() - controller.runWaitGroup.Add(1) - go controller.connectedReporter() + controller.runWaitGroup.Add(1) + go controller.establishTunnelWatcher() - controller.runWaitGroup.Add(1) - go controller.establishTunnelWatcher() + controller.runWaitGroup.Add(1) + go controller.runTunnels() - controller.runWaitGroup.Add(1) - go controller.runTunnels() + if controller.packetTunnelClient != nil { + controller.packetTunnelClient.Start() + } + } - if controller.packetTunnelClient != nil { - controller.packetTunnelClient.Start() + if controller.config.InproxyEnableProxy { + controller.runWaitGroup.Add(1) + go controller.runInproxyProxy() } // Wait while running @@ -1155,6 +1222,29 @@ func (controller *Controller) isFullyEstablished() bool { return len(controller.tunnels) >= controller.tunnelPoolSize } +// awaitFullyEstablished blocks until isFullyEstablished is true or the +// controller run ends. +func (controller *Controller) awaitFullyEstablished() bool { + + // TODO: don't poll, add a signal + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + if controller.isFullyEstablished() { + return true + } + + select { + case <-ticker.C: + // Check isFullyEstablished again + case <-controller.runCtx.Done(): + return false + } + } +} + // numTunnels returns the number of active and outstanding tunnels. // Oustanding is the number of tunnels required to fill the pool of // active tunnels. @@ -1316,18 +1406,22 @@ func (controller *Controller) Dial( // When the countries do not match, the server establishes a port forward, as // it does for all port forwards in non-split tunnel mode. There is no // additional round trip for tunneled port forwards. - - splitTunnelHost, _, err := net.SplitHostPort(remoteAddr) - if err != nil { - return nil, errors.Trace(err) - } + // + // Each destination includes a host and port. Since there are special + // cases where the server performs transparent redirection for specific + // host:port combinations, including UDPInterceptUdpgwServerAddress, the + // classification can differ for the same host but different ports and so + // the classification is cached using the full address, host:port, as the + // key. While this results in additional classification round trips for + // destinations with the same domain but differing ports, in practise + // most destinations use only port 443. untunneledCache := controller.untunneledSplitTunnelClassifications - // If the destination hostname is in the untunneled split tunnel - // classifications cache, skip the round trip to the server and do the - // direct, untunneled dial immediately. - _, cachedUntunneled := untunneledCache.Get(splitTunnelHost) + // If the destination is in the untunneled split tunnel classifications + // cache, skip the round trip to the server and do the direct, untunneled + // dial immediately. + _, cachedUntunneled := untunneledCache.Get(remoteAddr) if !cachedUntunneled { @@ -1339,9 +1433,9 @@ func (controller *Controller) Dial( if !splitTunnel { - // Clear any cached untunneled classification entry for this destination - // hostname, as the server is now classifying it as tunneled. - untunneledCache.Delete(splitTunnelHost) + // Clear any cached untunneled classification entry for this + // destination, as the server is now classifying it as tunneled. + untunneledCache.Delete(remoteAddr) return tunneledConn, nil } @@ -1349,10 +1443,10 @@ func (controller *Controller) Dial( // The server has indicated that the client should make a direct, // untunneled dial. Cache the classification to avoid this round trip in // the immediate future. - untunneledCache.Add(splitTunnelHost, true, lrucache.DefaultExpiration) + untunneledCache.Add(remoteAddr, true, lrucache.DefaultExpiration) } - NoticeUntunneled(splitTunnelHost) + NoticeUntunneled(remoteAddr) untunneledConn, err := controller.DirectDial(remoteAddr) if err != nil { @@ -1428,7 +1522,8 @@ func (p *protocolSelectionConstraints) isInitialCandidate( p.initialLimitTunnelProtocols, p.limitTunnelDialPortNumbers, p.limitQUICVersions, - excludeIntensive)) > 0 + excludeIntensive, + false)) > 0 } func (p *protocolSelectionConstraints) isCandidate( @@ -1441,7 +1536,8 @@ func (p *protocolSelectionConstraints) isCandidate( p.limitTunnelProtocols, p.limitTunnelDialPortNumbers, p.limitQUICVersions, - excludeIntensive)) > 0 + excludeIntensive, + false)) > 0 } func (p *protocolSelectionConstraints) canReplay( @@ -1455,13 +1551,15 @@ func (p *protocolSelectionConstraints) canReplay( } return common.Contains( - p.supportedProtocols(connectTunnelCount, excludeIntensive, serverEntry), + p.supportedProtocols( + connectTunnelCount, excludeIntensive, false, serverEntry), replayProtocol) } func (p *protocolSelectionConstraints) supportedProtocols( connectTunnelCount int, excludeIntensive bool, + excludeInproxy bool, serverEntry *protocol.ServerEntry) []string { limitTunnelProtocols := p.limitTunnelProtocols @@ -1478,15 +1576,18 @@ func (p *protocolSelectionConstraints) supportedProtocols( limitTunnelProtocols, p.limitTunnelDialPortNumbers, p.limitQUICVersions, - excludeIntensive) + excludeIntensive, + excludeInproxy) } func (p *protocolSelectionConstraints) selectProtocol( connectTunnelCount int, excludeIntensive bool, + excludeInproxy bool, serverEntry *protocol.ServerEntry) (string, bool) { - candidateProtocols := p.supportedProtocols(connectTunnelCount, excludeIntensive, serverEntry) + candidateProtocols := p.supportedProtocols( + connectTunnelCount, excludeIntensive, excludeInproxy, serverEntry) if len(candidateProtocols) == 0 { return "", false @@ -1501,7 +1602,6 @@ func (p *protocolSelectionConstraints) selectProtocol( index := prng.Intn(len(candidateProtocols)) return candidateProtocols[index], true - } type candidateServerEntry struct { @@ -1637,13 +1737,41 @@ func (controller *Controller) launchEstablishing() { initialLimitTunnelProtocols: p.TunnelProtocols(parameters.InitialLimitTunnelProtocols), initialLimitTunnelProtocolsCandidateCount: p.Int(parameters.InitialLimitTunnelProtocolsCandidateCount), limitTunnelProtocols: p.TunnelProtocols(parameters.LimitTunnelProtocols), - limitTunnelDialPortNumbers: protocol.TunnelProtocolPortLists( p.TunnelProtocolPortLists(parameters.LimitTunnelDialPortNumbers)), replayCandidateCount: p.Int(parameters.ReplayCandidateCount), } + // Adjust protocol limits for in-proxy personal proxy mode. In this mode, + // the client will make connections only through a proxy with the + // corresponding personal compartment ID, so non-in-proxy tunnel + // protocols are disabled. + + if len(controller.config.InproxyClientPersonalCompartmentIDs) > 0 { + + if len(controller.protocolSelectionConstraints.initialLimitTunnelProtocols) > 0 { + controller.protocolSelectionConstraints.initialLimitTunnelProtocols = + controller.protocolSelectionConstraints. + initialLimitTunnelProtocols.OnlyInproxyTunnelProtocols() + } + + if len(controller.protocolSelectionConstraints.limitTunnelProtocols) > 0 { + controller.protocolSelectionConstraints.limitTunnelProtocols = + controller.protocolSelectionConstraints. + limitTunnelProtocols.OnlyInproxyTunnelProtocols() + } + + // This covers two cases: if there was no limitTunnelProtocols to + // start, then limit to any in-proxy tunnel protocol; or, if there + // was a limit but OnlyInproxyTunnelProtocols evaluates to an empty + // list, also set the limit to any in-proxy tunnel protocol. + if len(controller.protocolSelectionConstraints.limitTunnelProtocols) == 0 { + controller.protocolSelectionConstraints.limitTunnelProtocols = + protocol.InproxyTunnelProtocols + } + } + // ConnectionWorkerPoolSize may be set by tactics. workerPoolSize := p.Int(parameters.ConnectionWorkerPoolSize) @@ -1936,7 +2064,7 @@ loop: break } - if controller.config.TargetApiProtocol == protocol.PSIPHON_SSH_API_PROTOCOL && + if controller.config.TargetAPIProtocol == protocol.PSIPHON_API_PROTOCOL_SSH && !serverEntry.SupportsSSHAPIRequests() { continue } @@ -2146,8 +2274,12 @@ loop: // intensive. In this case, a StaggerConnectionWorkersMilliseconds // delay may still be incurred. - limitIntensiveConnectionWorkers := controller.config.GetParameters().Get().Int( - parameters.LimitIntensiveConnectionWorkers) + p := controller.config.GetParameters().Get() + limitIntensiveConnectionWorkers := p.Int(parameters.LimitIntensiveConnectionWorkers) + inproxySelectionProbability := p.Float(parameters.InproxyTunnelProtocolSelectionProbability) + staggerPeriod := p.Duration(parameters.StaggerConnectionWorkersPeriod) + staggerJitter := p.Float(parameters.StaggerConnectionWorkersJitter) + p.Close() controller.concurrentEstablishTunnelsMutex.Lock() @@ -2166,9 +2298,18 @@ loop: } selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) { + + // The in-proxy protocol selection probability allows for + // tuning/limiting in-proxy usage independent of + // LimitTunnelProtocol targeting. + + onlyInproxy := len(controller.config.InproxyClientPersonalCompartmentIDs) > 0 + includeInproxy := onlyInproxy || prng.FlipWeightedCoin(inproxySelectionProbability) + return controller.protocolSelectionConstraints.selectProtocol( controller.establishConnectTunnelCount, excludeIntensive, + !includeInproxy, serverEntry) } @@ -2200,6 +2341,8 @@ loop: canReplay, selectProtocol, candidateServerEntry.serverEntry, + controller.inproxyClientBrokerClientManager, + controller.inproxyNATStateManager, false, controller.establishConnectTunnelCount, int(atomic.LoadInt32(&controller.establishedTunnelsCount))) @@ -2258,11 +2401,6 @@ loop: // The stagger is applied when establishConnectTunnelCount > 0 -- that // is, for all but the first dial. - p := controller.config.GetParameters().Get() - staggerPeriod := p.Duration(parameters.StaggerConnectionWorkersPeriod) - staggerJitter := p.Float(parameters.StaggerConnectionWorkersJitter) - p.Close() - if establishConnectTunnelCount > 0 && staggerPeriod != 0 { controller.staggerMutex.Lock() timer := time.NewTimer(prng.JitterDuration(staggerPeriod, staggerJitter)) @@ -2358,3 +2496,374 @@ func (controller *Controller) isStopEstablishing() bool { } return false } + +func (controller *Controller) runInproxyProxy() { + defer controller.runWaitGroup.Done() + + if !controller.config.DisableTactics { + + // Obtain and apply tactics before connecting to the broker and + // announcing proxies. + + if controller.config.DisableTunnels { + + // When not running client tunnel establishment, perform an OOB tactics + // fetch, if required, here. + + GetTactics(controller.runCtx, controller.config) + + } else if !controller.config.InproxySkipAwaitFullyConnected { + + // When running client tunnel establishment, await establishment + // as this guarantees fresh tactics from either an OOB request or + // a handshake response. + // + // While it may be possible to proceed sooner, using cached + // tactics, waiting until establishment is complete avoids + // potential races between tactics updates. + + if !controller.awaitFullyEstablished() { + // Controller is shutting down + return + } + + } else { + + // InproxySkipAwaitFullyConnected is a special case to support + // server/server_test, where a client must be its own proxy; in + // this case, awaitFullyEstablished will block forever. + // inproxyAwaitBrokerSpecs simply waits until any broker specs + // become available, which is sufficient for the test but is not + // as robust as awaiting fresh tactics. + + isProxy := true + if !controller.inproxyAwaitBrokerSpecs(isProxy) { + // Controller is shutting down + return + } + } + } + + // Don't announce proxies if tactics indicates it won't be allowed. This + // is also enforced on the broker; this client-side check cuts down on + // load from well-behaved proxies. + + p := controller.config.GetParameters().Get() + allowProxy := p.Bool(parameters.InproxyAllowProxy) + p.Close() + + // Don't announce proxies when running on an incompatible network, such as + // a non-Psiphon VPN. + + compatibleNetwork := IsInproxyCompatibleNetworkType(controller.config.GetNetworkID()) + + // Running an unstream proxy is also an incompatible case. + + useUpstreamProxy := controller.config.UseUpstreamProxy() + + if !allowProxy || !compatibleNetwork || useUpstreamProxy || !inproxy.Enabled() { + if !allowProxy { + NoticeError("inproxy proxy: not allowed") + } + if !compatibleNetwork { + NoticeError("inproxy proxy: not run due to incompatible network") + } + if useUpstreamProxy { + NoticeError("inproxy proxy: not run due to upstream proxy configuration") + } + if !inproxy.Enabled() { + NoticeError("inproxy proxy: inproxy implementation is not enabled") + } + if controller.config.DisableTunnels { + // Signal failure -- and shutdown -- only if running in proxy-only mode. If also + // running a tunnel, keep running without proxies. + controller.SignalComponentFailure() + } + return + } + + // The debugLogging flag is passed to both NoticeCommonLogger and to the + // inproxy package as well; skipping debug logs in the inproxy package, + // before calling into the notice logger, avoids unnecessary allocations + // and formatting when debug logging is off. + debugLogging := controller.config.InproxyEnableWebRTCDebugLogging + + activityNoticePeriod := p.Duration(parameters.InproxyProxyTotalActivityNoticePeriod) + var lastActivityNotice time.Time + var lastActivityConnectingClients, lastActivityConnectedClients int32 + var lastActivityConnectingClientsTotal, lastActivityConnectedClientsTotal int32 + var activityTotalBytesUp, activityTotalBytesDown int64 + activityUpdater := func( + connectingClients int32, + connectedClients int32, + bytesUp int64, + bytesDown int64, + _ time.Duration) { + + // This emit logic mirrors the logic for NoticeBytesTransferred and + // NoticeTotalBytesTransferred in tunnel.operateTunnel. + + // InproxyProxyActivity frequently emits bytes transferred since the + // last notice, when not idle; in addition to the current number of + // connecting and connected clients, whenever that changes. This + // frequent notice is excluded from diagnostics and is for UI + // activity display. + + if controller.config.EmitInproxyProxyActivity && + (bytesUp > 0 || bytesDown > 0) || + connectingClients != lastActivityConnectingClients || + connectedClients != lastActivityConnectedClients { + + NoticeInproxyProxyActivity( + connectingClients, connectedClients, bytesUp, bytesDown) + + lastActivityConnectingClients = connectingClients + lastActivityConnectedClients = connectedClients + } + + activityTotalBytesUp += bytesUp + activityTotalBytesDown += bytesDown + + // InproxyProxyTotalActivity periodically emits total bytes + // transferred since starting; in addition to the current number of + // connecting and connected clients, whenever that changes. This + // notice is for diagnostics. + + if lastActivityNotice.Add(activityNoticePeriod).Before(time.Now()) || + connectingClients != lastActivityConnectingClientsTotal || + connectedClients != lastActivityConnectedClientsTotal { + + NoticeInproxyProxyTotalActivity( + connectingClients, connectedClients, + activityTotalBytesUp, activityTotalBytesDown) + lastActivityNotice = time.Now() + + lastActivityConnectingClientsTotal = connectingClients + lastActivityConnectedClientsTotal = connectedClients + } + } + + config := &inproxy.ProxyConfig{ + Logger: NoticeCommonLogger(debugLogging), + EnableWebRTCDebugLogging: debugLogging, + WaitForNetworkConnectivity: controller.inproxyWaitForNetworkConnectivity, + GetBrokerClient: controller.inproxyGetProxyBrokerClient, + GetBaseAPIParameters: controller.inproxyGetProxyAPIParameters, + MakeWebRTCDialCoordinator: controller.inproxyMakeProxyWebRTCDialCoordinator, + HandleTacticsPayload: controller.inproxyHandleProxyTacticsPayload, + MaxClients: controller.config.InproxyMaxClients, + LimitUpstreamBytesPerSecond: controller.config.InproxyLimitUpstreamBytesPerSecond, + LimitDownstreamBytesPerSecond: controller.config.InproxyLimitDownstreamBytesPerSecond, + + OperatorMessageHandler: func(messageJSON string) { + NoticeInproxyOperatorMessage(messageJSON) + }, + + ActivityUpdater: activityUpdater, + } + + proxy, err := inproxy.NewProxy(config) + if err != nil { + NoticeError("inproxy.NewProxy failed: %v", errors.Trace(err)) + controller.SignalComponentFailure() + return + } + + NoticeInfo("inproxy proxy: running") + + proxy.Run(controller.runCtx) + + // Emit one last NoticeInproxyProxyTotalActivity with the final byte counts. + NoticeInproxyProxyTotalActivity( + lastActivityConnectingClients, lastActivityConnectedClients, + activityTotalBytesUp, activityTotalBytesDown) + + NoticeInfo("inproxy proxy: stopped") +} + +func (controller *Controller) inproxyAwaitBrokerSpecs(isProxy bool) bool { + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + p := controller.config.GetParameters().Get() + var brokerSpecs parameters.InproxyBrokerSpecsValue + if isProxy { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyProxyBrokerSpecs, parameters.InproxyBrokerSpecs) + } else { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyClientBrokerSpecs, parameters.InproxyBrokerSpecs) + } + p.Close() + + if len(brokerSpecs) > 0 { + return true + } + + select { + case <-ticker.C: + // Check isFullyEstablished again + case <-controller.runCtx.Done(): + return false + } + } +} + +func (controller *Controller) inproxyWaitForNetworkConnectivity() bool { + return WaitForNetworkConnectivity( + controller.runCtx, + controller.config.NetworkConnectivityChecker) +} + +// inproxyGetProxyBrokerClient returns the broker client shared by all proxy +// operations. +func (controller *Controller) inproxyGetProxyBrokerClient() (*inproxy.BrokerClient, error) { + + brokerClient, _, err := controller.inproxyProxyBrokerClientManager.GetBrokerClient( + controller.config.GetNetworkID()) + if err != nil { + return nil, errors.Trace(err) + } + return brokerClient, nil +} + +func (controller *Controller) inproxyGetProxyAPIParameters() ( + common.APIParameters, string, error) { + + // TODO: include broker fronting dial parameters to be logged by the + // broker. + params := getBaseAPIParameters( + baseParametersNoDialParameters, true, controller.config, nil) + + if controller.config.DisableTactics { + return params, "", nil + } + + // Add the stored tactics tag, so that the broker can return new tactics if + // available. + // + // The active network ID is recorded returned and rechecked for + // consistency when storing any new tactics returned from the broker; + // other tactics fetches have this same check. + + networkID := controller.config.GetNetworkID() + + err := tactics.SetTacticsAPIParameters( + GetTacticsStorer(controller.config), networkID, params) + if err != nil { + return nil, "", errors.Trace(err) + } + + return params, networkID, nil +} + +func (controller *Controller) inproxyMakeProxyWebRTCDialCoordinator() ( + inproxy.WebRTCDialCoordinator, error) { + + // nil is passed in for both InproxySTUNDialParameters and + // InproxyWebRTCDialParameters, so those parameters will be newly + // auto-generated for each client/proxy connection attempt. Unlike the + // in-proxy client, there is currently no replay of STUN or WebRTC dial + // parameters. + + isProxy := true + webRTCDialInstance, err := NewInproxyWebRTCDialInstance( + controller.config, + controller.config.GetNetworkID(), + isProxy, + controller.inproxyNATStateManager, + nil, + nil) + if err != nil { + return nil, errors.Trace(err) + } + + return webRTCDialInstance, nil +} + +// inproxyHandleProxyTacticsPayload handles new tactics returned from the +// proxy and returns when tactics have changed. +// +// inproxyHandleTacticsPayload duplicates some tactics-handling code from +// doHandshakeRequest. +func (controller *Controller) inproxyHandleProxyTacticsPayload( + networkID string, tacticsPayload []byte) bool { + + if controller.config.DisableTactics { + return false + } + + if controller.config.GetNetworkID() != networkID { + // Ignore the tactics if the network ID has changed. + return false + } + + var payload *tactics.Payload + err := json.Unmarshal(tacticsPayload, &payload) + if err != nil { + NoticeError("unmarshal tactics payload failed: %v", errors.Trace(err)) + return false + } + + if payload == nil { + // See "null" comment in doHandshakeRequest. + return false + } + + // The in-proxy proxy implementation arranges for the first ProxyAnnounce + // request to get a head start in case there are new tactics available + // from the broker. Additional requests are also staggered. + // + // It can still happen that concurrent in-flight ProxyAnnounce requests + // receive duplicate new-tactics responses. + // + // TODO: detect this case and avoid resetting the broker client and NAT + // state managers more than necessary. + + // Serialize processing of tactics from ProxyAnnounce responses. + controller.inproxyHandleTacticsMutex.Lock() + defer controller.inproxyHandleTacticsMutex.Unlock() + + // When tactics are unchanged, the broker, as in the handshake case, + // returns a tactics payload, but without new tactics. As in the + // handshake case, HandleTacticsPayload is called in order to extend the + // TTL of the locally cached, unchanged tactics. Due to the potential + // high frequency and concurrency of ProxyAnnnounce requests vs. + // handshakes, a limit is added to update the data store's tactics TTL no + // more than one per minute. + + appliedNewTactics := payload.Tactics != nil + now := time.Now() + if !appliedNewTactics && now.Sub(controller.inproxyLastStoredTactics) > 1*time.Minute { + // Skip TTL-only disk write. + return false + } + controller.inproxyLastStoredTactics = now + + tacticsRecord, err := tactics.HandleTacticsPayload( + GetTacticsStorer(controller.config), networkID, payload) + if err != nil { + NoticeError("HandleTacticsPayload failed: %v", errors.Trace(err)) + return false + } + + if tacticsRecord != nil && + prng.FlipWeightedCoin(tacticsRecord.Tactics.Probability) { + + // SetParameters signals registered components, including broker + // client and NAT state managers, that must reset upon tactics changes. + + err := controller.config.SetParameters( + tacticsRecord.Tag, true, tacticsRecord.Tactics.Parameters) + if err != nil { + NoticeInfo("apply inproxy broker tactics failed: %s", err) + return false + } + } else { + appliedNewTactics = false + } + + return appliedNewTactics +} diff --git a/psiphon/controller_test.go b/psiphon/controller_test.go index cee0e75ae..bd9e8c916 100644 --- a/psiphon/controller_test.go +++ b/psiphon/controller_test.go @@ -85,119 +85,64 @@ func TestMain(m *testing.M) { func TestUntunneledUpgradeDownload(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: true, - protocol: "", - clientIsLatestVersion: false, - disableUntunneledUpgrade: false, - disableEstablishing: true, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, + expectNoServerEntries: true, + protocol: "", + disableEstablishing: true, }) } func TestUntunneledResumableUpgradeDownload(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: true, - protocol: "", - clientIsLatestVersion: false, - disableUntunneledUpgrade: false, - disableEstablishing: true, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: true, - transformHostNames: false, - useFragmentor: false, + expectNoServerEntries: true, + protocol: "", + disableEstablishing: true, + disruptNetwork: true, }) } func TestUntunneledUpgradeClientIsLatestVersion(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: true, - protocol: "", - clientIsLatestVersion: true, - disableUntunneledUpgrade: false, - disableEstablishing: true, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, + expectNoServerEntries: true, + protocol: "", + clientIsLatestVersion: true, + disableEstablishing: true, }) } func TestUntunneledResumableFetchRemoteServerList(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: true, - protocol: "", - clientIsLatestVersion: true, - disableUntunneledUpgrade: false, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: true, - transformHostNames: false, - useFragmentor: false, + expectNoServerEntries: true, + protocol: "", + clientIsLatestVersion: true, + disruptNetwork: true, }) } func TestTunneledUpgradeClientIsLatestVersion(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: "", clientIsLatestVersion: true, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestSSH(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestObfuscatedSSH(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } @@ -207,102 +152,50 @@ func TestTLS(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestUnfrontedMeek(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestUnfrontedMeekWithTransformer(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK, - clientIsLatestVersion: true, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, transformHostNames: true, - useFragmentor: false, }) } func TestFrontedMeek(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_FRONTED_MEEK, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestFrontedMeekWithTransformer(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_FRONTED_MEEK, - clientIsLatestVersion: true, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, transformHostNames: true, - useFragmentor: false, }) } func TestFrontedMeekHTTP(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP, - clientIsLatestVersion: true, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } @@ -311,116 +204,62 @@ func TestUnfrontedMeekHTTPS(t *testing.T) { &controllerRunConfig{ expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestUnfrontedMeekHTTPSWithTransformer(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, - protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, - clientIsLatestVersion: true, - disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: true, - useFragmentor: false, + protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, + clientIsLatestVersion: true, + transformHostNames: true, }) } func TestDisabledApi(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: "", clientIsLatestVersion: true, disableUntunneledUpgrade: true, - disableEstablishing: false, disableApi: true, tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestObfuscatedSSHWithUpstreamProxy(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, useUpstreamProxy: true, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestUnfrontedMeekWithUpstreamProxy(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, useUpstreamProxy: true, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestUnfrontedMeekHTTPSWithUpstreamProxy(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, useUpstreamProxy: true, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } func TestObfuscatedSSHFragmentor(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, useFragmentor: true, }) } @@ -428,16 +267,8 @@ func TestObfuscatedSSHFragmentor(t *testing.T) { func TestFrontedMeekFragmentor(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_FRONTED_MEEK, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, useFragmentor: true, }) } @@ -448,17 +279,8 @@ func TestQUIC(t *testing.T) { } controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, }) } @@ -471,34 +293,69 @@ func TestFrontedQUIC(t *testing.T) { } controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_FRONTED_MEEK_QUIC_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, - tunnelPoolSize: 1, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, + }) +} + +func TestInproxyOSSH(t *testing.T) { + + t.Skipf("temporarily disabled") + + controllerRun(t, + &controllerRunConfig{ + protocol: "INPROXY-WEBRTC-OSSH", + disableUntunneledUpgrade: true, + }) +} + +func TestInproxyQUICOSSH(t *testing.T) { + + t.Skipf("temporarily disabled") + + controllerRun(t, + &controllerRunConfig{ + protocol: "INPROXY-WEBRTC-QUIC-OSSH", + disableUntunneledUpgrade: true, + }) +} + +func TestInproxyUnfrontedMeekHTTPS(t *testing.T) { + + t.Skipf("temporarily disabled") + + controllerRun(t, + &controllerRunConfig{ + protocol: "INPROXY-WEBRTC-UNFRONTED-MEEK-HTTPS-OSSH", + disableUntunneledUpgrade: true, + }) +} + +func TestInproxyTLSOSSH(t *testing.T) { + + t.Skipf("temporarily disabled") + + controllerRun(t, + &controllerRunConfig{ + protocol: "INPROXY-WEBRTC-TLS-OSSH", + disableUntunneledUpgrade: true, }) } func TestTunnelPool(t *testing.T) { controllerRun(t, &controllerRunConfig{ - expectNoServerEntries: false, protocol: protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, - clientIsLatestVersion: false, disableUntunneledUpgrade: true, - disableEstablishing: false, - disableApi: false, tunnelPoolSize: 2, - useUpstreamProxy: false, - disruptNetwork: false, - transformHostNames: false, - useFragmentor: false, + }) +} + +func TestLegacyAPIEncoding(t *testing.T) { + controllerRun(t, + &controllerRunConfig{ + protocol: protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, + useLegacyAPIEncoding: true, }) } @@ -514,6 +371,7 @@ type controllerRunConfig struct { disruptNetwork bool transformHostNames bool useFragmentor bool + useLegacyAPIEncoding bool } func controllerRun(t *testing.T, runConfig *controllerRunConfig) { @@ -533,7 +391,10 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) { // Note: a successful tactics request may modify config parameters. var modifyConfig map[string]interface{} - json.Unmarshal(configJSON, &modifyConfig) + err = json.Unmarshal(configJSON, &modifyConfig) + if err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } modifyConfig["DataRootDirectory"] = testDataDirName @@ -574,6 +435,10 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) { modifyConfig["ObfuscatedSSHMaxPadding"] = 8192 } + if runConfig.useLegacyAPIEncoding { + modifyConfig["TargetAPIEncoding"] = protocol.PSIPHON_API_ENCODING_JSON + } + configJSON, _ = json.Marshal(modifyConfig) config, err := LoadConfig(configJSON) diff --git a/psiphon/dataStore.go b/psiphon/dataStore.go index 0f596c3f0..60a2678c2 100644 --- a/psiphon/dataStore.go +++ b/psiphon/dataStore.go @@ -23,6 +23,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "math" "os" @@ -49,9 +50,11 @@ var ( datastoreTacticsBucket = []byte("tactics") datastoreSpeedTestSamplesBucket = []byte("speedTestSamples") datastoreDialParametersBucket = []byte("dialParameters") + datastoreNetworkReplayParametersBucket = []byte("networkReplayParameters") datastoreLastConnectedKey = "lastConnected" datastoreLastServerEntryFilterKey = []byte("lastServerEntryFilter") datastoreAffinityServerEntryIDKey = []byte("affinityServerEntryID") + datastoreInproxyCommonCompartmentIDsKey = []byte("inproxyCommonCompartmentIDs") datastorePersistentStatTypeRemoteServerList = string(datastoreRemoteServerListStatsBucket) datastorePersistentStatTypeFailedTunnel = string(datastoreFailedTunnelStatsBucket) datastoreServerEntryFetchGCThreshold = 10 @@ -691,13 +694,14 @@ func newTargetServerEntryIterator(config *Config, isTactics bool) (bool, *Server if len(limitTunnelProtocols) > 0 { // At the ServerEntryIterator level, only limitTunnelProtocols is applied; - // excludeIntensive is handled higher up. + // excludeIntensive and excludeInproxt are handled higher up. if len(serverEntry.GetSupportedProtocols( conditionallyEnabledComponents{}, config.UseUpstreamProxy(), limitTunnelProtocols, limitTunnelDialPortNumbers, limitQUICVersions, + false, false)) == 0 { return false, nil, errors.Tracef( "TargetServerEntry does not support LimitTunnelProtocols: %v", limitTunnelProtocols) @@ -2053,6 +2057,235 @@ func GetAffinityServerEntryAndDialParameters( return serverEntryFields, dialParams, nil } +// GetSignedServerEntryFields loads, from the datastore, the raw JSON server +// entry fields for the specified server entry. +// +// The protocol.ServerEntryFields returned by GetSignedServerEntryFields will +// include all fields required to verify the server entry signature, +// including new fields added after the current client version, which do not +// get unmarshaled into protocol.ServerEntry. +func GetSignedServerEntryFields(ipAddress string) (protocol.ServerEntryFields, error) { + + var serverEntryFields protocol.ServerEntryFields + + err := datastoreView(func(tx *datastoreTx) error { + + serverEntries := tx.bucket(datastoreServerEntriesBucket) + + key := []byte(ipAddress) + + serverEntryRecord := serverEntries.get(key) + if serverEntryRecord == nil { + return errors.TraceNew("server entry not found") + } + + err := json.Unmarshal( + serverEntryRecord, + &serverEntryFields) + if err != nil { + return errors.Trace(err) + } + + return nil + }) + if err != nil { + return nil, errors.Trace(err) + } + + err = serverEntryFields.ToSignedFields() + if err != nil { + return nil, errors.Trace(err) + } + + return serverEntryFields, nil +} + +// StoreInproxyCommonCompartmentIDs stores a list of in-proxy common +// compartment IDs. Clients obtain common compartment IDs from tactics; +// persisting the IDs enables a scheme whereby existing clients may continue +// to use common compartment IDs, and access the related in-proxy proxy +// matches, even after the compartment IDs are de-listed from tactics. +// +// The caller is responsible for merging new and existing compartment IDs into +// the input list, and trimming the length of the list appropriately. +func StoreInproxyCommonCompartmentIDs(compartmentIDs []string) error { + + value, err := json.Marshal(compartmentIDs) + if err != nil { + return errors.Trace(err) + } + + err = setBucketValue( + datastoreKeyValueBucket, + datastoreInproxyCommonCompartmentIDsKey, + value) + return errors.Trace(err) +} + +// LoadInproxyCommonCompartmentIDs returns the list of known, persisted +// in-proxy common compartment IDs. LoadInproxyCommonCompartmentIDs will +// return nil, nil when there is no stored list. +func LoadInproxyCommonCompartmentIDs() ([]string, error) { + + var compartmentIDs []string + + err := getBucketValue( + datastoreKeyValueBucket, + datastoreInproxyCommonCompartmentIDsKey, + func(value []byte) error { + if value == nil { + return nil + } + + // Note: unlike with server entries, this record is not deleted + // when the unmarshal fails, as the caller should proceed with + // any common compartment IDs available with tactics; and + // subsequently call StoreInproxyCommonCompartmentIDs, writing + // over this record. + + err := json.Unmarshal(value, &compartmentIDs) + if err != nil { + return errors.Trace(err) + } + + return nil + }) + if err != nil { + return nil, errors.Trace(err) + } + + return compartmentIDs, nil +} + +// makeNetworkReplayParametersKey creates a unique key for the replay +// parameters which reflects the network ID context; the replay data type, R; +// and the replay ID, which uniquely identifies the object that is replayed +// (for example, am in-proxy broker public key, uniquely identifying a +// broker). +func makeNetworkReplayParametersKey[R any](networkID, replayID string) []byte { + + // A pointer to an R is used instead of stack (or heap) allocating a full + // R object. As a result, the %T will include a '*' prefix, and this is + // removed by the [1:]. + // + // Fields are delimited using 0 bytes, which aren't expected to occur in + // the field string values. + + var t *R + key := append(append([]byte(nil), []byte(networkID)...), 0) + key = append(append(key, []byte(fmt.Sprintf("%T", t)[1:])...), 0) + key = append(key, []byte(replayID)...) + return key +} + +// SetNetworkReplayParameters stores replay parameters associated with the +// specified context and object. +// +// Limitation: unlike server dial parameters, the datastore does not prune +// replay records. +func SetNetworkReplayParameters[R any](networkID, replayID string, replayParams *R) error { + + key := makeNetworkReplayParametersKey[R](networkID, replayID) + + data, err := json.Marshal(replayParams) + if err != nil { + return errors.Trace(err) + } + + return setBucketValue(datastoreNetworkReplayParametersBucket, key, data) +} + +// ShuffleAndGetNetworkReplayParameters takes a list of candidate objects and +// selects one. The candidates are considered in random order. The first +// candidate with a valid replay record is returned, along with its replay +// parameters. The caller provides isValidReplay which should indicate if +// replay parameters remain valid; the caller should check for expiry and +// changes to the underlhying tactics. When no valid replay parameters are +// found, ShuffleAndGetNetworkReplayParameters returns a candidate and nil +// replay parameters. +func ShuffleAndGetNetworkReplayParameters[C, R any]( + networkID string, + replayEnabled bool, + candidates []*C, + getReplayID func(*C) string, + isValidReplay func(*C, *R) bool) (*C, *R, error) { + + if len(candidates) < 1 { + return nil, nil, errors.TraceNew("no candidates") + } + + // Don't shuffle or otherwise mutate the candidates slice, which may be a + // tactics parameter. + permutedIndexes := prng.Perm(len(candidates)) + + candidate := candidates[permutedIndexes[0]] + var replay *R + + if !replayEnabled { + // If replay is disabled, return the first post-shuffle candidate with + // nil replay parameters. + return candidate, replay, nil + } + + err := datastoreUpdate(func(tx *datastoreTx) error { + + bucket := tx.bucket(datastoreNetworkReplayParametersBucket) + + for _, i := range permutedIndexes { + c := candidates[i] + key := makeNetworkReplayParametersKey[R](networkID, getReplayID(c)) + value := bucket.get(key) + if value == nil { + continue + } + var r *R + err := json.Unmarshal(value, &r) + if err != nil { + + // Delete the record. This avoids continually checking it. + // Note that the deletes performed here won't prune records + // for old candidates which are no longer passed in to + // ShuffleAndGetNetworkReplayParameters. + NoticeWarning( + "ShuffleAndGetNetworkReplayParameters: unmarshal failed: %s", + errors.Trace(err)) + _ = bucket.delete(key) + continue + } + if isValidReplay(c, r) { + candidate = c + replay = r + return nil + } else { + + // Delete the record if it's no longer valid due to expiry or + // tactics changes. This avoids continually checking it. + _ = bucket.delete(key) + continue + } + } + + // No valid replay parameters were found, so candidates[0] and a nil + // replay will be returned. + return nil + }) + if err != nil { + return nil, nil, errors.Trace(err) + } + + return candidate, replay, nil + +} + +// DeleteNetworkReplayParameters deletes the replay record associated with the +// specified context and object. +func DeleteNetworkReplayParameters[R any](networkID, replayID string) error { + + key := makeNetworkReplayParametersKey[R](networkID, replayID) + + return deleteBucketValue(datastoreNetworkReplayParametersBucket, key) +} + func setBucketValue(bucket, key, value []byte) error { err := datastoreUpdate(func(tx *datastoreTx) error { diff --git a/psiphon/dataStoreRecovery_test.go b/psiphon/dataStoreRecovery_test.go index 604bd4d85..6e0092e4e 100644 --- a/psiphon/dataStoreRecovery_test.go +++ b/psiphon/dataStoreRecovery_test.go @@ -55,8 +55,8 @@ func TestBoltResiliency(t *testing.T) { clientConfigJSON := ` { "ClientPlatform" : "", - "ClientVersion" : "0", - "SponsorId" : "0", + "ClientVersion" : "0000000000000000", + "SponsorId" : "0000000000000000", "PropagationChannelId" : "0", "ConnectionWorkerPoolSize" : 10, "EstablishTunnelTimeoutSeconds" : 1, diff --git a/psiphon/dataStore_bolt.go b/psiphon/dataStore_bolt.go index 79150835d..0aabb5016 100644 --- a/psiphon/dataStore_bolt.go +++ b/psiphon/dataStore_bolt.go @@ -169,6 +169,7 @@ func tryDatastoreOpenDB( datastoreTacticsBucket, datastoreSpeedTestSamplesBucket, datastoreDialParametersBucket, + datastoreNetworkReplayParametersBucket, } for _, bucket := range requiredBuckets { _, err := tx.CreateBucketIfNotExists(bucket) diff --git a/psiphon/dialParameters.go b/psiphon/dialParameters.go index c89779f45..4d45e324d 100644 --- a/psiphon/dialParameters.go +++ b/psiphon/dialParameters.go @@ -28,13 +28,13 @@ import ( "net" "net/http" "strconv" - "strings" "sync/atomic" "time" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/fragmentor" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/obfuscator" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" @@ -109,7 +109,6 @@ type DialParameters struct { MeekVerifyPins []string MeekHostHeader string MeekObfuscatorPaddingSeed *prng.Seed - MeekTLSPaddingSize int MeekResolvedIPAddress atomic.Value `json:"-"` TLSOSSHTransformedSNIServerName bool @@ -165,6 +164,15 @@ type DialParameters struct { steeringIPCache *lrucache.Cache `json:"-"` steeringIPCacheKey string `json:"-"` + inproxyDialInitialized bool `json:"-"` + inproxyBrokerClient *inproxy.BrokerClient `json:"-"` + inproxyBrokerDialParameters *InproxyBrokerDialParameters `json:"-"` + inproxyPackedSignedServerEntry []byte `json:"-"` + inproxyNATStateManager *InproxyNATStateManager `json:"-"` + InproxySTUNDialParameters *InproxySTUNDialParameters + InproxyWebRTCDialParameters *InproxyWebRTCDialParameters + inproxyConn atomic.Value `json:"-"` + dialConfig *DialConfig `json:"-"` meekConfig *MeekConfig `json:"-"` } @@ -193,10 +201,17 @@ func MakeDialParameters( canReplay func(serverEntry *protocol.ServerEntry, replayProtocol string) bool, selectProtocol func(serverEntry *protocol.ServerEntry) (string, bool), serverEntry *protocol.ServerEntry, + inproxyClientBrokerClientManager *InproxyBrokerClientManager, + inproxyClientNATStateManager *InproxyNATStateManager, isTactics bool, candidateNumber int, establishedTunnelsCount int) (*DialParameters, error) { + // Note: a subset of this code is duplicated in + // MakeInproxyBrokerDialParameters and makeFrontedHTTPClient, and all + // functions need to be updated when, e.g., new TLS obfuscation + // parameters are added. + networkID := config.GetNetworkID() p := config.GetParameters().Get() @@ -224,6 +239,8 @@ func MakeDialParameters( replayHTTPTransformerParameters := p.Bool(parameters.ReplayHTTPTransformerParameters) replayOSSHSeedTransformerParameters := p.Bool(parameters.ReplayOSSHSeedTransformerParameters) replayOSSHPrefix := p.Bool(parameters.ReplayOSSHPrefix) + replayInproxySTUN := p.Bool(parameters.ReplayInproxySTUN) + replayInproxyWebRTC := p.Bool(parameters.ReplayInproxyWebRTC) // Check for existing dial parameters for this server/network ID. @@ -280,7 +297,8 @@ func MakeDialParameters( // ReplayIgnoreChangedConfigState is set. One case is the call // below to fragmentor.NewUpstreamConfig, made when initializing // dialParams.dialConfig. - (!replayIgnoreChangedConfigState && !bytes.Equal(dialParams.LastUsedConfigStateHash, configStateHash)) || + (!replayIgnoreChangedConfigState && + !bytes.Equal(dialParams.LastUsedConfigStateHash, configStateHash)) || // Replay is disabled when the server entry has changed. !bytes.Equal(dialParams.LastUsedServerEntryHash, serverEntryHash) || @@ -290,6 +308,15 @@ func MakeDialParameters( (dialParams.QUICVersion != "" && !common.Contains(protocol.SupportedQUICVersions, dialParams.QUICVersion)) || + // Prioritize adjusting use of 3rd party infrastructure -- public + // STUN servers -- over replay, even with IgnoreChangedConfigState set. + (dialParams.ConjureSTUNServerAddress != "" && + !common.Contains( + p.Strings(parameters.ConjureSTUNServerAddresses), + dialParams.ConjureSTUNServerAddress)) || + (dialParams.InproxySTUNDialParameters != nil && + dialParams.InproxySTUNDialParameters.IsValidClientReplay(p)) || + // Legacy clients use ConjureAPIRegistrarURL with // gotapdance.tapdance.APIRegistrar and new clients use // ConjureAPIRegistrarBidirectionalURL with @@ -436,6 +463,15 @@ func MakeDialParameters( dialParams.TunnelProtocol = selectedProtocol } + if isTactics && !protocol.TunnelProtocolSupportsTactics(dialParams.TunnelProtocol) { + + NoticeSkipServerEntry( + "protocol does not support tactics request: %s", + dialParams.TunnelProtocol) + + return nil, nil + } + // Skip this candidate when the clients tactics restrict usage of the // provider ID. See the corresponding server-side enforcement comments in // server.TacticsListener.accept. @@ -460,6 +496,9 @@ func MakeDialParameters( // Skip this candidate when the clients tactics restrict usage of the // fronting provider ID. See the corresponding server-side enforcement // comments in server.MeekServer.getSessionOrEndpoint. + // + // RestrictFrontingProviderIDs applies only to fronted meek tunnels, where + // all traffic is relayed through a fronting provider. if protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) && common.Contains( p.Strings(parameters.RestrictFrontingProviderIDs), @@ -515,7 +554,7 @@ func MakeDialParameters( if (!isReplay || !replayBPF) && ClientBPFEnabled() && - protocol.TunnelProtocolUsesTCP(dialParams.TunnelProtocol) { + protocol.TunnelProtocolMayUseClientBPF(dialParams.TunnelProtocol) { if p.WeightedCoinFlip(parameters.BPFClientTCPProbability) { dialParams.BPFProgramName = "" @@ -603,7 +642,10 @@ func MakeDialParameters( dialParams.ConjureAPIRegistrarBidirectionalURL = apiURL frontingSpecs := p.FrontingSpecs(parameters.ConjureAPIRegistrarFrontingSpecs) + + var frontingTransport string dialParams.FrontingProviderID, + frontingTransport, dialParams.MeekFrontingDialAddress, dialParams.MeekSNIServerName, dialParams.MeekVerifyServerName, @@ -614,6 +656,10 @@ func MakeDialParameters( return nil, errors.Trace(err) } + if frontingTransport != protocol.FRONTING_TRANSPORT_HTTPS { + return nil, errors.TraceNew("unsupported fronting transport") + } + if config.DisableSystemRootCAs { return nil, errors.TraceNew("TLS certificates must be verified in Conjure API registration") } @@ -779,7 +825,8 @@ func MakeDialParameters( protocol.TunnelProtocolUsesQUIC(dialParams.TunnelProtocol) { isFronted := protocol.TunnelProtocolUsesFrontedMeekQUIC(dialParams.TunnelProtocol) - dialParams.QUICVersion = selectQUICVersion(isFronted, serverEntry, p) + isInproxy := protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) + dialParams.QUICVersion = selectQUICVersion(isFronted, isInproxy, serverEntry, p) // Due to potential tactics configurations, it may be that no QUIC // version is selected. Abort immediately, with no error, as in the @@ -859,9 +906,19 @@ func MakeDialParameters( // dialParams.ResolveParameters must be nil when the dial address is an IP // address to ensure that no DNS dial parameters are reported in metrics // or diagnostics when when no domain is resolved. + // + // No resolve parameters are initialized for in-proxy dials; broker and + // STUN domain resolves use distinct ResolveParameters; and the proxy, + // not the client, resolves any 2nd hop dial address domain. + // + // Limitation: DNSResolverPreresolvedIPAddressCIDRs could be applied by + // the in-proxy client, and relayed to the proxy, enabling a preresolved + // dial by the proxy, but this is currently not compatible with broker + // dial destination verification. useResolver := (protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) || dialParams.ConjureAPIRegistration) && + !protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) && net.ParseIP(dialParams.MeekFrontingDialAddress) == nil if (!isReplay || !replayResolveParameters) && useResolver { @@ -916,7 +973,12 @@ func MakeDialParameters( // OSSH prefix and seed transform are applied only to the OSSH tunnel protocol, // and not to any other protocol layered over OSSH. - if dialParams.TunnelProtocol == protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH { + if protocol.TunnelProtocolIsObfuscatedSSH(dialParams.TunnelProtocol) { + + // Limitation: in the case of in-proxy OSSH, the client will get and + // apply tactics based on its geolocation, but any OSSH prefix is + // visible on the wire only after the 2nd hop. Configuring an OSSH + // prefix based on the in-proxy proxy geolocation would be preferable. if serverEntry.DisableOSSHTransforms { @@ -988,7 +1050,8 @@ func MakeDialParameters( isFronted := protocol.TunnelProtocolUsesFrontedMeek(dialParams.TunnelProtocol) - params, err := makeHTTPTransformerParameters(config.GetParameters().Get(), serverEntry.FrontingProviderID, isFronted) + params, err := makeHTTPTransformerParameters( + config.GetParameters().Get(), serverEntry.FrontingProviderID, isFronted) if err != nil { return nil, errors.Trace(err) } @@ -1001,6 +1064,112 @@ func MakeDialParameters( } } + // In-proxy dial configuration + + // For untunneled tactics requests, meek servers running in-proxy tunnel + // protocols may be used, but the actual in-proxy 1st hop dial is skipped + // and the meek server is used directly. + if !isTactics && protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) { + + // Check for incompatible networks, such as running under a + // non-Psiphon VPN. While this check could be made before calling + // MakeDialParameters, such as in selectProtocol during iteration, + // checking here uses the network ID obtained in MakeDialParameters, + // and the logged warning is useful for diagnostics. + if !IsInproxyCompatibleNetworkType(dialParams.NetworkID) { + return nil, errors.TraceNew("inproxy protocols skipped on incompatible network") + } + + // inproxyDialInitialized indicates that the inproxy dial was wired + // up, and this isn't an untunneled tactics request (isTactics). + dialParams.inproxyDialInitialized = true + + // Store a reference to the current, shared in-proxy broker client. + // + // The broker client has its own, independent replay scheme and its + // own dial parameters which are reported for metrics. + dialParams.inproxyBrokerClient, + dialParams.inproxyBrokerDialParameters, + err = inproxyClientBrokerClientManager.GetBrokerClient(networkID) + if err != nil { + return nil, errors.Trace(err) + } + + // Load the signed server entry to be presented to the broker as proof + // that the in-proxy destination is a Psiphon server. The original + // JSON server entry fields are loaded from the local data store + // (or from config.TargetServerEntry), since the signature may + // include fields, added after this client version, which are in the + // JSON but not in the protocol.ServerEntry. + + var serverEntryFields protocol.ServerEntryFields + if serverEntry.LocalSource == protocol.SERVER_ENTRY_SOURCE_TARGET { + + serverEntryFields, err = protocol.DecodeServerEntryFields( + config.TargetServerEntry, "", protocol.SERVER_ENTRY_SOURCE_TARGET) + if err != nil { + return nil, errors.Trace(err) + } + if serverEntryFields.GetIPAddress() != serverEntry.IpAddress { + return nil, errors.TraceNew("unexpected TargetServerEntry") + } + err = serverEntryFields.ToSignedFields() + if err != nil { + return nil, errors.Trace(err) + } + + } else { + + serverEntryFields, err = GetSignedServerEntryFields(serverEntry.IpAddress) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Verify the server entry signature locally to avoid a doomed broker + // round trip. + // + // Limitation: the broker still checks signatures, but it won't get to + // log an error in this case. + err = serverEntryFields.VerifySignature(config.ServerEntrySignaturePublicKey) + if err != nil { + return nil, errors.Trace(err) + } + + packedServerEntryFields, err := protocol.EncodePackedServerEntryFields(serverEntryFields) + if err != nil { + return nil, errors.Trace(err) + } + dialParams.inproxyPackedSignedServerEntry, err = protocol.CBOREncoding.Marshal(packedServerEntryFields) + if err != nil { + return nil, errors.Trace(err) + } + + dialParams.inproxyNATStateManager = inproxyClientNATStateManager + + if !isReplay || !replayInproxySTUN { + + isProxy := false + dialParams.InproxySTUNDialParameters, err = MakeInproxySTUNDialParameters(config, p, isProxy) + if err != nil { + return nil, errors.Trace(err) + } + } else if dialParams.InproxySTUNDialParameters != nil { + dialParams.InproxySTUNDialParameters.Prepare() + } + + if !isReplay || !replayInproxyWebRTC { + + dialParams.InproxyWebRTCDialParameters, err = MakeInproxyWebRTCDialParameters(p) + if err != nil { + return nil, errors.Trace(err) + } + } + + // dialParams.inproxyConn is left uninitialized until after the dial, + // and until then Load will return nil. + } + // Set dial address fields. This portion of configuration is // deterministic, given the parameters established or replayed so far. @@ -1011,7 +1180,7 @@ func MakeDialParameters( dialParams.DialPortNumber = strconv.Itoa(dialPortNumber) - switch dialParams.TunnelProtocol { + switch protocol.TunnelProtocolMinusInproxy(dialParams.TunnelProtocol) { case protocol.TUNNEL_PROTOCOL_SSH, protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH, @@ -1105,7 +1274,7 @@ func MakeDialParameters( } } - // Initialize/replay User-Agent header for HTTP upstream proxy and meek protocols. + // Initialize upstream proxy. if config.UseUpstreamProxy() { // Note: UpstreamProxyURL will be validated in the dial @@ -1115,6 +1284,8 @@ func MakeDialParameters( } } + // Initialize/replay User-Agent header for HTTP upstream proxy and meek protocols. + dialCustomHeaders := makeDialCustomHeaders(config, p) if protocol.TunnelProtocolUsesMeek(dialParams.TunnelProtocol) || @@ -1126,6 +1297,9 @@ func MakeDialParameters( } if dialParams.SelectedUserAgent { + + // Limitation: if config.CustomHeaders adds a User-Agent between + // replays, it may be ignored due to replaying a selected User-Agent. dialCustomHeaders.Set("User-Agent", dialParams.UserAgent) } @@ -1347,6 +1521,18 @@ func MakeDialParameters( } } + if !isTactics && + protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) && + protocol.TunnelProtocolUsesTCP(dialParams.TunnelProtocol) { + + // Set DialConfig.CustomDialer to redirect all underlying TCP dials to use + // in-proxy as a 1st hop. Since QUIC doesn't use DialConfig or have its + // own CustomDialer, QUIC is handled with an explicit special case in + // dialTunnel. + + dialParams.dialConfig.CustomDialer = makeInproxyTCPDialer(config, dialParams) + } + return dialParams, nil } @@ -1354,8 +1540,15 @@ func (dialParams *DialParameters) GetDialConfig() *DialConfig { return dialParams.dialConfig } +func (dialParams *DialParameters) GetMeekConfig() *MeekConfig { + return dialParams.meekConfig +} + func (dialParams *DialParameters) GetTLSOSSHConfig(config *Config) *TLSTunnelConfig { + // TLSTunnelConfig isn't pre-created in MakeDialParameters to avoid holding a long + // term reference to TLSTunnelConfig.Parameters. + return &TLSTunnelConfig{ CustomTLSConfig: &CustomTLSConfig{ Parameters: config.GetParameters(), @@ -1381,30 +1574,41 @@ func (dialParams *DialParameters) GetTLSOSSHConfig(config *Config) *TLSTunnelCon } } -func (dialParams *DialParameters) GetMeekConfig() *MeekConfig { - return dialParams.meekConfig -} - -// GetNetworkType returns a network type name, suitable for metrics, which is -// derived from the network ID. func (dialParams *DialParameters) GetNetworkType() string { + return GetNetworkType(dialParams.NetworkID) +} - // Unlike the logic in loggingNetworkIDGetter.GetNetworkID, we don't take the - // arbitrary text before the first "-" since some platforms without network - // detection support stub in random values to enable tactics. Instead we - // check for and use the common network type prefixes currently used in - // NetworkIDGetter implementations. +func (dialParams *DialParameters) GetTLSVersionForMetrics() string { + return getTLSVersionForMetrics(dialParams.TLSVersion, dialParams.NoDefaultTLSSessionID) +} - if strings.HasPrefix(dialParams.NetworkID, "VPN") { - return "VPN" +func getTLSVersionForMetrics(tlsVersion string, noDefaultTLSSessionID bool) string { + version := tlsVersion + if noDefaultTLSSessionID { + version += "-no_def_id" } - if strings.HasPrefix(dialParams.NetworkID, "WIFI") { - return "WIFI" + return version +} + +func (dialParams *DialParameters) GetInproxyMetrics() common.LogFields { + inproxyMetrics := common.LogFields{} + + if !dialParams.inproxyDialInitialized { + // This was an untunneled tactics request using an in-proxy meek + // server, no there was no in-proxy dial or dial parameters. + return inproxyMetrics } - if strings.HasPrefix(dialParams.NetworkID, "MOBILE") { - return "MOBILE" + + for _, metrics := range []common.LogFields{ + dialParams.inproxyBrokerDialParameters.GetMetrics(), + dialParams.InproxySTUNDialParameters.GetMetrics(), + dialParams.InproxyWebRTCDialParameters.GetMetrics(), + } { + for name, value := range metrics { + inproxyMetrics[name] = value + } } - return "UNKNOWN" + return inproxyMetrics } func (dialParams *DialParameters) Succeeded() { @@ -1457,18 +1661,6 @@ func (dialParams *DialParameters) Failed(config *Config) { } } -func (dialParams *DialParameters) GetTLSVersionForMetrics() string { - return getTLSVersionForMetrics(dialParams.TLSVersion, dialParams.NoDefaultTLSSessionID) -} - -func getTLSVersionForMetrics(tlsVersion string, noDefaultTLSSessionID bool) string { - version := tlsVersion - if noDefaultTLSSessionID { - version += "-no_def_id" - } - return version -} - // ExchangedDialParameters represents the subset of DialParameters that is // shared in a client-to-client exchange of server connection info. // @@ -1644,6 +1836,7 @@ func selectFrontingParameters( func selectQUICVersion( isFronted bool, + isInproxy bool, serverEntry *protocol.ServerEntry, p parameters.ParametersAccessor) string { @@ -1670,8 +1863,11 @@ func selectQUICVersion( quicVersions := make([]string, 0) // Don't use gQUIC versions when the server entry specifies QUICv1-only. + // + // SupportedQUICVersions is specific to QUIC-OSSH and does not apply to + // in-proxy variants; all in-proxy QUIC is QUICv1-only. supportedQUICVersions := protocol.SupportedQUICVersions - if serverEntry.SupportsOnlyQUICv1() { + if isInproxy || serverEntry.SupportsOnlyQUICv1() { supportedQUICVersions = protocol.SupportedQUICv1Versions } diff --git a/psiphon/dialParameters_test.go b/psiphon/dialParameters_test.go index a05e95809..05df430e1 100644 --- a/psiphon/dialParameters_test.go +++ b/psiphon/dialParameters_test.go @@ -151,7 +151,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { upstreamProxyErrorCallback := func(_ error) {} dialParams, err := MakeDialParameters( - clientConfig, steeringIPCache, upstreamProxyErrorCallback, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, upstreamProxyErrorCallback, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -275,7 +275,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { dialParams.Failed(clientConfig) dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -291,7 +291,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { testNetworkID = prng.HexString(8) dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -309,7 +309,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { dialParams.Succeeded() replayDialParams, err := MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -421,7 +421,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -440,7 +440,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -456,7 +456,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { time.Sleep(1 * time.Second) dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -472,7 +472,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { serverEntries[0].ConfigurationVersion += 1 dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -496,7 +496,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -504,7 +504,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { dialParams.Succeeded() replayDialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -537,7 +537,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if protocol.TunnelProtocolUsesFrontedMeek(tunnelProtocol) { if err == nil { @@ -567,7 +567,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if protocol.TunnelProtocolIsDirect(tunnelProtocol) { if err == nil { @@ -595,7 +595,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { // Test: steering IP used in non-replay case dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -613,7 +613,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { setCacheEntry("127.0.0.1") dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -641,7 +641,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { dialParams.Succeeded() dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -659,7 +659,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { setCacheEntry("127.0.0.2") dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -675,7 +675,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { steeringIPCache.Flush() dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -685,7 +685,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { setCacheEntry("127.0.0.3") dialParams, err = MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntries[0], nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -727,7 +727,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { if i%10 == 0 { dialParams, err := MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -757,7 +757,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err := MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -780,7 +780,7 @@ func runDialParametersAndReplay(t *testing.T, tunnelProtocol string) { } dialParams, err := MakeDialParameters( - clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, false, 0, 0) + clientConfig, steeringIPCache, nil, canReplay, selectProtocol, serverEntry, nil, nil, false, 0, 0) if err != nil { t.Fatalf("MakeDialParameters failed: %s", err) } @@ -851,7 +851,7 @@ func TestLimitTunnelDialPortNumbers(t *testing.T) { } selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) { - return constraints.selectProtocol(0, false, serverEntry) + return constraints.selectProtocol(0, false, false, serverEntry) } for _, tunnelProtocol := range protocol.SupportedTunnelProtocols { @@ -1004,7 +1004,7 @@ func TestMakeHTTPTransformerParameters(t *testing.T) { t.Fatalf("parameters.NewParameters failed %v", err) } - _, err = params.Set("", false, tt.paramValues) + _, err = params.Set("", 0, tt.paramValues) if err != nil { t.Fatalf("params.Set failed %v", err) } @@ -1076,7 +1076,7 @@ func TestMakeOSSHObfuscatorSeedTranformerParameters(t *testing.T) { t.Fatalf("parameters.NewParameters failed: %v", err) } - _, err = params.Set("", false, tt.paramValues) + _, err = params.Set("", 0, tt.paramValues) if err != nil { t.Fatalf("params.Set failed: %v", err) } diff --git a/psiphon/exchange_test.go b/psiphon/exchange_test.go index 72f76b1f1..0dfb6334e 100644 --- a/psiphon/exchange_test.go +++ b/psiphon/exchange_test.go @@ -63,8 +63,8 @@ func TestServerEntryExchange(t *testing.T) { configJSONTemplate := ` { - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "ServerEntrySignaturePublicKey" : "%s", "ExchangeObfuscationKey" : "%s", "NetworkID" : "%s" @@ -190,6 +190,8 @@ func TestServerEntryExchange(t *testing.T) { canReplay, selectProtocol, serverEntry, + nil, + nil, false, 0, 0) diff --git a/psiphon/feedback.go b/psiphon/feedback.go index e6884f549..202c94afd 100644 --- a/psiphon/feedback.go +++ b/psiphon/feedback.go @@ -204,12 +204,14 @@ func SendFeedback(ctx context.Context, config *Config, diagnostics, uploadPath s feedbackUploadTimeout) defer cancelFunc() + payloadSecure := true client, _, err := MakeUntunneledHTTPClient( feedbackUploadCtx, config, untunneledDialConfig, uploadURL.SkipVerify, config.DisableSystemRootCAs, + payloadSecure, uploadURL.FrontingSpecs, func(frontingProviderID string) { NoticeInfo( diff --git a/psiphon/httpProxy.go b/psiphon/httpProxy.go index cedf9c03d..0ba3aea96 100644 --- a/psiphon/httpProxy.go +++ b/psiphon/httpProxy.go @@ -74,7 +74,6 @@ import ( // // Origin URLs must include the scheme prefix ("http://" or "https://") and must be // URL encoded. -// type HttpProxy struct { config *Config tunneler Tunneler @@ -86,7 +85,7 @@ type HttpProxy struct { urlProxyDirectRelay *http.Transport urlProxyDirectClient *http.Client responseHeaderTimeout time.Duration - openConns *common.Conns + openConns *common.Conns[net.Conn] stopListeningBroadcast chan struct{} listenIP string listenPort int @@ -173,7 +172,7 @@ func NewHttpProxy( urlProxyDirectRelay: urlProxyDirectRelay, urlProxyDirectClient: urlProxyDirectClient, responseHeaderTimeout: responseHeaderTimeout, - openConns: common.NewConns(), + openConns: common.NewConns[net.Conn](), stopListeningBroadcast: make(chan struct{}), listenIP: proxyIP, listenPort: proxyPort, @@ -226,7 +225,6 @@ func (proxy *HttpProxy) Close() { // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// func (proxy *HttpProxy) ServeHTTP(responseWriter http.ResponseWriter, request *http.Request) { if request.Method == "CONNECT" { conn := hijack(responseWriter) diff --git a/psiphon/inproxy.go b/psiphon/inproxy.go new file mode 100644 index 000000000..5455d4fc6 --- /dev/null +++ b/psiphon/inproxy.go @@ -0,0 +1,2375 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package psiphon + +import ( + "bytes" + "context" + "encoding/binary" + std_errors "errors" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "strconv" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/fragmentor" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/resolver" + "github.com/cespare/xxhash" + "golang.org/x/net/bpf" +) + +// InproxyBrokerClientManager manages an InproxyBrokerClientInstance, an +// in-proxy broker client, and its associated broker dial parameters, that +// may be shared by multiple client dials or proxy instances. There is no +// explicit close operation for the managed InproxyBrokerClientInstance. +// +// Once used, the current InproxyBrokerClientInstance and its broker client is +// left actively connected to the broker, to minimize transport round trips +// for additional requests. +// +// The InproxyBrokerClientManager and its components implement a replay system +// for broker client dials. As one broker client is shared access multiple +// client in-proxy dials, the broker dial parameters are replayed +// independently from tunnel dial parameters. +// +// The NewInproxyBrokerClientInstance layer provides a fixed association +// between a broker client and its broker dial parameters, ensuring that +// in-proxy success/failure callbacks reference the correct replay parameters +// when setting or clearing replay. +// +// A new InproxyBrokerClientInstance, including the broker dial parameters and +// broker client, is instantiated when the active network ID changes, using +// tactics for the new network. +type InproxyBrokerClientManager struct { + config *Config + isProxy bool + + mutex sync.Mutex + networkID string + brokerClientInstance *InproxyBrokerClientInstance +} + +// NewInproxyBrokerClientManager creates a new InproxyBrokerClientManager. +// NewInproxyBrokerClientManager does not perform any network operations; the +// managed InproxyBrokerClientInstance is initialized when used for a round +// trip. +func NewInproxyBrokerClientManager( + config *Config, isProxy bool) *InproxyBrokerClientManager { + + b := &InproxyBrokerClientManager{ + config: config, + isProxy: isProxy, + } + + // b.brokerClientInstance is initialized on demand, when getBrokerClient + // is called. + + return b +} + +// TacticsApplied implements the TacticsAppliedReceiver interface, and is +// called when tactics have changed, which triggers a broker client reset in +// order to apply potentially changed parameters. +func (b *InproxyBrokerClientManager) TacticsApplied() error { + b.mutex.Lock() + defer b.mutex.Unlock() + + // Don't reset when not yet initialized; b.brokerClientInstance is + // initialized only on demand. + if b.brokerClientInstance == nil { + return nil + } + + // TODO: as a future future enhancement, don't reset when the tactics + // brokerSpecs.Hash() is unchanged? + + return errors.Trace(b.reset()) +} + +// GetBrokerClient returns the current, shared broker client and its +// corresponding dial parametrers (for metrics logging). If there is no +// current broker client, if the network ID differs from the network ID +// associated with the previous broker client, a new broker client is +// initialized. +func (b *InproxyBrokerClientManager) GetBrokerClient( + networkID string) (*inproxy.BrokerClient, *InproxyBrokerDialParameters, error) { + + b.mutex.Lock() + defer b.mutex.Unlock() + + if b.brokerClientInstance == nil || b.networkID != networkID { + err := b.reset() + if err != nil { + return nil, nil, errors.Trace(err) + } + } + + // The b.brokerClientInstance.brokerClient is wired up to refer back to + // b.brokerClientInstance.brokerDialParams/roundTripper, etc. + + return b.brokerClientInstance.brokerClient, + b.brokerClientInstance.brokerDialParams, + nil +} + +func (b *InproxyBrokerClientManager) resetBrokerClientOnRoundTripperFailed( + brokerClientInstance *InproxyBrokerClientInstance) error { + + b.mutex.Lock() + defer b.mutex.Unlock() + + if b.brokerClientInstance != brokerClientInstance { + // Ignore the reset if the signal comes from the non-current + // brokerClientInstance, which may occur when multiple in-flight + // round trips fail in close proximity. + return nil + } + + return errors.Trace(b.reset()) +} + +func (b *InproxyBrokerClientManager) reset() error { + + // Assumes b.mutex lock is held. + + if b.brokerClientInstance != nil { + + // Close the existing broker client. This will close all underlying + // network connections, interrupting any in-flight requests. This + // close is invoked in the resetBrokerClientOnRoundTripperFailed + // case, where it's expected that the round tripped has permanently + // failed. + + b.brokerClientInstance.Close() + } + + // Any existing broker client is removed, even if + // NewInproxyBrokerClientInstance fails. This ensures, for example, that + // an existing broker client is removed when its spec is no longer + // available in tactics. + b.networkID = "" + b.brokerClientInstance = nil + + networkID := b.config.GetNetworkID() + + brokerClientInstance, err := NewInproxyBrokerClientInstance( + b.config, b, networkID, b.isProxy) + if err != nil { + return errors.Trace(err) + } + + b.networkID = networkID + b.brokerClientInstance = brokerClientInstance + + return nil +} + +// InproxyBrokerClientInstance pairs an inproxy.BrokerClient instance with an +// implementation of the inproxy.BrokerDialCoordinator interface and the +// associated, underlying broker dial parameters. InproxyBrokerClientInstance +// implements broker client dial replay. +type InproxyBrokerClientInstance struct { + config *Config + brokerClientManager *InproxyBrokerClientManager + networkID string + brokerClientPrivateKey inproxy.SessionPrivateKey + brokerClient *inproxy.BrokerClient + brokerPublicKey inproxy.SessionPublicKey + brokerRootObfuscationSecret inproxy.ObfuscationSecret + brokerDialParams *InproxyBrokerDialParameters + replayEnabled bool + isReplay bool + roundTripper *InproxyBrokerRoundTripper + personalCompartmentIDs []inproxy.ID + commonCompartmentIDs []inproxy.ID + sessionHandshakeTimeout time.Duration + announceRequestTimeout time.Duration + announceDelay time.Duration + announceDelayJitter float64 + answerRequestTimeout time.Duration + offerRequestTimeout time.Duration + offerRetryDelay time.Duration + offerRetryJitter float64 + relayedPacketRequestTimeout time.Duration + replayRetainFailedProbability float64 + replayUpdateFrequency time.Duration + + mutex sync.Mutex + lastStoreReplay time.Time +} + +// NewInproxyBrokerClientInstance creates a new InproxyBrokerClientInstance. +// NewInproxyBrokerClientManager does not perform any network operations; the +// new InproxyBrokerClientInstance is initialized when used for a round +// trip. +func NewInproxyBrokerClientInstance( + config *Config, + brokerClientManager *InproxyBrokerClientManager, + networkID string, + isProxy bool) (*InproxyBrokerClientInstance, error) { + + p := config.GetParameters().Get() + defer p.Close() + + // Select common or personal compartment IDs. + + commonCompartmentIDs, personalCompartmentIDs, err := prepareCompartmentIDs(config, p, isProxy) + if err != nil { + return nil, errors.Trace(err) + } + + // Select the broker to use, optionally favoring brokers with replay + // data. + + var brokerSpecs parameters.InproxyBrokerSpecsValue + if isProxy { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyProxyBrokerSpecs, parameters.InproxyBrokerSpecs) + } else { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyClientBrokerSpecs, parameters.InproxyBrokerSpecs) + } + if len(brokerSpecs) == 0 { + return nil, errors.TraceNew("no broker specs") + } + + // To ensure personal compartment ID client/proxy rendezvous at same + // broker, simply pick the first configured broker. + // + // Limitations: there's no failover or load balancing for the personal + // compartment ID case; and this logic assumes that the broker spec + // tactics are the same for the client and proxy. + + if len(personalCompartmentIDs) > 0 { + brokerSpecs = brokerSpecs[:1] + } + + now := time.Now() + + // Prefer a broker with replay data. + + // Replay is disabled when the TTL, InproxyReplayBrokerDialParametersTTL, + // is 0. + ttl := p.Duration(parameters.InproxyReplayBrokerDialParametersTTL) + + replayEnabled := ttl > 0 && + !config.DisableReplay && + prng.FlipWeightedCoin(p.Float(parameters.InproxyReplayBrokerDialParametersProbability)) + + brokerSpec, brokerDialParams, err := + ShuffleAndGetNetworkReplayParameters[parameters.InproxyBrokerSpec, InproxyBrokerDialParameters]( + networkID, + replayEnabled, + brokerSpecs, + func(spec *parameters.InproxyBrokerSpec) string { return spec.BrokerPublicKey }, + func(spec *parameters.InproxyBrokerSpec, dialParams *InproxyBrokerDialParameters) bool { + return dialParams.LastUsedTimestamp.After(now.Add(-ttl)) && + bytes.Equal(dialParams.LastUsedBrokerSpecHash, hashBrokerSpec(spec)) + }) + if err != nil { + NoticeWarning("ShuffleAndGetNetworkReplayParameters failed: %v", errors.Trace(err)) + + // When there's an error, try to continue, using a random broker spec + // and no replay dial parameters. + brokerSpec = brokerSpecs[prng.Intn(len(brokerSpecs)-1)] + } + + // Generate new broker dial parameters if not replaying. Later, isReplay + // is used to report the replay metric. + + isReplay := brokerDialParams != nil + + if !isReplay { + brokerDialParams, err = MakeInproxyBrokerDialParameters(config, p, networkID, brokerSpec) + if err != nil { + return nil, errors.Trace(err) + } + } else { + brokerDialParams.brokerSpec = brokerSpec + err := brokerDialParams.prepareDialConfigs(config, p, networkID, true, nil) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Load broker key material. + + brokerPublicKey, err := inproxy.SessionPublicKeyFromString(brokerSpec.BrokerPublicKey) + if err != nil { + return nil, errors.Trace(err) + } + brokerRootObfuscationSecret, err := inproxy.ObfuscationSecretFromString(brokerSpec.BrokerRootObfuscationSecret) + if err != nil { + return nil, errors.Trace(err) + } + + roundTripper := NewInproxyBrokerRoundTripper(p, brokerDialParams) + + // Clients always generate an ephemeral session key pair. Proxies may opt + // to use a long-lived key pair for proxied traffic attribution. + + var brokerClientPrivateKey inproxy.SessionPrivateKey + if isProxy && config.InproxyProxySessionPrivateKey != "" { + brokerClientPrivateKey, err = inproxy.SessionPrivateKeyFromString(config.InproxyProxySessionPrivateKey) + if err != nil { + return nil, errors.Trace(err) + } + } else { + brokerClientPrivateKey, err = inproxy.GenerateSessionPrivateKey() + if err != nil { + return nil, errors.Trace(err) + } + } + + // InproxyBrokerClientInstance implements the + // inproxy.BrokerDialCoordinator interface and passes itself to + // inproxy.NewBrokerClient in order to provide the round tripper, key + // material, compartment IDs, timeouts, and other configuration to the + // in-proxy broker client. + // + // Timeouts are not replayed, but snapshots are stored in the + // InproxyBrokerClientInstance for efficient lookup. + + b := &InproxyBrokerClientInstance{ + config: config, + brokerClientManager: brokerClientManager, + networkID: networkID, + brokerClientPrivateKey: brokerClientPrivateKey, + brokerPublicKey: brokerPublicKey, + brokerRootObfuscationSecret: brokerRootObfuscationSecret, + brokerDialParams: brokerDialParams, + replayEnabled: replayEnabled, + isReplay: isReplay, + roundTripper: roundTripper, + personalCompartmentIDs: personalCompartmentIDs, + commonCompartmentIDs: commonCompartmentIDs, + + sessionHandshakeTimeout: p.Duration(parameters.InproxySessionHandshakeRoundTripTimeout), + announceRequestTimeout: p.Duration(parameters.InproxyProxyAnnounceRequestTimeout), + announceDelay: p.Duration(parameters.InproxyProxyAnnounceDelay), + announceDelayJitter: p.Float(parameters.InproxyProxyAnnounceDelayJitter), + answerRequestTimeout: p.Duration(parameters.InproxyProxyAnswerRequestTimeout), + offerRequestTimeout: p.Duration(parameters.InproxyClientOfferRequestTimeout), + offerRetryDelay: p.Duration(parameters.InproxyClientOfferRetryDelay), + offerRetryJitter: p.Float(parameters.InproxyClientOfferRetryJitter), + relayedPacketRequestTimeout: p.Duration(parameters.InproxyClientRelayedPacketRequestTimeout), + replayRetainFailedProbability: p.Float(parameters.InproxyReplayBrokerRetainFailedProbability), + replayUpdateFrequency: p.Duration(parameters.InproxyReplayBrokerUpdateFrequency), + } + + // Initialize broker client. This will start with a fresh broker session. + // + // When resetBrokerClientOnRoundTripperFailed is invoked due to a failure + // at the transport level -- TLS or domain fronting -- + // NewInproxyBrokerClientInstance is invoked, resetting both the broker + // client round tripper and the broker session. As a future enhancement, + // consider distinguishing between transport and session errors and + // retaining a valid established session when only the transport needs to + // be reset/retried. + + b.brokerClient, err = inproxy.NewBrokerClient(b) + if err != nil { + return nil, errors.Trace(err) + } + + return b, nil +} + +func prepareCompartmentIDs( + config *Config, + p parameters.ParametersAccessor, + isProxy bool) ([]inproxy.ID, []inproxy.ID, error) { + + // Personal compartment IDs are loaded from the tunnel-core config; these + // are set by the external app based on user input/configuration of IDs + // generated by or obtained from personal proxies. Both clients and + // proxies send personal compartment IDs to the in-proxy broker. For + // clients, when personal compartment IDs are configured, no common + // compartment IDs are prepared, ensuring matches with only proxies that + // supply the corresponding personal compartment IDs. + // + // Common compartment IDs are obtained from tactics and merged with + // previously learned IDs stored in the local datastore. When new IDs are + // obtained from tactics, the merged list is written back to the + // datastore. This allows for schemes where common compartment IDs are + // distributed to sets of clients, then removed from distibution, and + // still used to match proxies to those sets of clients. Only clients + // send common compartment IDs to the in-proxy broker. Proxies are + // automatically assigned to common compartments by the broker. + // + // Maximum compartment ID list lengths are enforced to ensure broker + // request sizes don't grow unbounded. + // + // Limitation: currently, in max length trimming, new common compartment + // IDs take precedence over older IDs. + + maxCompartmentIDListLength := p.Int(parameters.InproxyMaxCompartmentIDListLength) + + configPersonalCompartmentIDs := config.InproxyProxyPersonalCompartmentIDs + if !isProxy { + configPersonalCompartmentIDs = config.InproxyClientPersonalCompartmentIDs + } + personalCompartmentIDs, err := inproxy.IDsFromStrings(configPersonalCompartmentIDs) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if len(personalCompartmentIDs) > maxCompartmentIDListLength { + + // Trim the list. It's not expected that user-configured personal + // compartment ID lists will exceed the max length. + // + // TODO: shuffle before trimming? Prioritize previous matches? + + personalCompartmentIDs = personalCompartmentIDs[:maxCompartmentIDListLength] + } + + var commonCompartmentIDs []inproxy.ID + if !isProxy && len(personalCompartmentIDs) == 0 { + + tacticsCommonCompartmentIDs := p.InproxyCompartmentIDs(parameters.InproxyCommonCompartmentIDs) + + knownCommonCompartmentIDs, err := LoadInproxyCommonCompartmentIDs() + if err != nil { + NoticeWarning("LoadInproxyCommonCompartmentIDs failed: %v", errors.Trace(err)) + // Continue with only the tactics common compartment IDs. + } + + newCompartmentIDs := make([]string, 0, len(tacticsCommonCompartmentIDs)) + + for _, compartmentID := range tacticsCommonCompartmentIDs { + // TODO: faster lookup? + if !common.Contains(knownCommonCompartmentIDs, compartmentID) { + newCompartmentIDs = append(newCompartmentIDs, compartmentID) + } + } + + if len(newCompartmentIDs) > 0 { + newCompartmentIDs = append(newCompartmentIDs, knownCommonCompartmentIDs...) + + // Locally store more than InproxyMaxCompartmentIDListLength known + // common compartment IDs, in case the request limit parameter is + // increased in the future. + // maxPersistedCommonCompartmentIDListLength still limits the + // length of the list to cap local memory and disk impact. + + maxPersistedCommonCompartmentIDListLength := 500 // ~16K + if maxCompartmentIDListLength > maxPersistedCommonCompartmentIDListLength { + maxPersistedCommonCompartmentIDListLength = maxCompartmentIDListLength + } + + if len(newCompartmentIDs) > maxPersistedCommonCompartmentIDListLength { + newCompartmentIDs = newCompartmentIDs[:maxPersistedCommonCompartmentIDListLength] + } + + err := StoreInproxyCommonCompartmentIDs(newCompartmentIDs) + if err != nil { + NoticeWarning("StoreInproxyCommonCompartmentIDs failed: %v", errors.Trace(err)) + // Continue without persisting new common compartment IDs. + } + + knownCommonCompartmentIDs = newCompartmentIDs + } + + commonCompartmentIDs, err = inproxy.IDsFromStrings(knownCommonCompartmentIDs) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if len(commonCompartmentIDs) > maxCompartmentIDListLength { + // TODO: shuffle before trimming? Prioritize previous matches? + commonCompartmentIDs = commonCompartmentIDs[:maxCompartmentIDListLength] + } + } + + return commonCompartmentIDs, personalCompartmentIDs, nil +} + +// Close closes the broker client round tripped, including closing all +// underlying network connections, which will interrupt any in-flight round +// trips. +func (b *InproxyBrokerClientInstance) Close() error { + err := b.roundTripper.Close() + return errors.Trace(err) +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) NetworkID() string { + return b.networkID +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) NetworkType() inproxy.NetworkType { + return getInproxyNetworkType(GetNetworkType(b.networkID)) +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) CommonCompartmentIDs() []inproxy.ID { + return b.commonCompartmentIDs +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) PersonalCompartmentIDs() []inproxy.ID { + return b.personalCompartmentIDs +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerClientPrivateKey() inproxy.SessionPrivateKey { + return b.brokerClientPrivateKey +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerPublicKey() inproxy.SessionPublicKey { + return b.brokerPublicKey +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerRootObfuscationSecret() inproxy.ObfuscationSecret { + return b.brokerRootObfuscationSecret +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerClientRoundTripper() (inproxy.RoundTripper, error) { + + // Returns the same round tripper for the lifetime of the + // inproxy.BrokerDialCoordinator, ensuring all requests for one in-proxy + // dial or proxy relay use the same broker, as is necessary due to the + // broker state for the proxy announce/answer, client broker/server + // relay, etc. + + return b.roundTripper, nil +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerClientRoundTripperSucceeded(roundTripper inproxy.RoundTripper) { + b.mutex.Lock() + defer b.mutex.Unlock() + + if rt, ok := roundTripper.(*InproxyBrokerRoundTripper); !ok || rt != b.roundTripper { + // Passing in the round tripper obtained from BrokerClientRoundTripper + // is just used for sanity check in this implementation, since each + // InproxyBrokerClientInstance has exactly one round tripper. + NoticeError("BrokerClientRoundTripperSucceeded: roundTripper instance mismatch") + return + } + + // Set replay or extend the broker dial parameters replay TTL after a + // success. With tunnel dial parameters, the replay TTL is extended after + // every successful tunnel connection. Since there are potentially more + // and more frequent broker round trips one tunnel dial, the TTL is only + // extended after some target duration has elapsed, to avoid excessive + // datastore writes. + + now := time.Now() + if b.replayEnabled && now.Sub(b.lastStoreReplay) > b.replayUpdateFrequency { + b.brokerDialParams.LastUsedTimestamp = time.Now() + + err := SetNetworkReplayParameters[InproxyBrokerDialParameters]( + b.networkID, b.brokerDialParams.brokerSpec.BrokerPublicKey, b.brokerDialParams) + if err != nil { + NoticeWarning("StoreBrokerDialParameters failed: %v", errors.Trace(err)) + // Continue without persisting replay changes. + } else { + b.lastStoreReplay = now + } + } + + // Verify/extend the resolver cache entry for any resolved domain after a + // success. + // + // Limitation: currently this re-extends regardless of how long ago the DNS + // resolve happened. + + resolver := b.config.GetResolver() + if resolver != nil { + resolver.VerifyCacheExtension(b.brokerDialParams.FrontingDialAddress) + } +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) BrokerClientRoundTripperFailed(roundTripper inproxy.RoundTripper) { + b.mutex.Lock() + defer b.mutex.Unlock() + + if rt, ok := roundTripper.(*InproxyBrokerRoundTripper); !ok || rt != b.roundTripper { + // Passing in the round tripper obtained from BrokerClientRoundTripper + // is just used for sanity check in this implementation, since each + // InproxyBrokerClientInstance has exactly one round tripper. + NoticeError("BrokerClientRoundTripperFailed: roundTripper instance mismatch") + return + } + + // Delete any persistent replay dial parameters. Unlike with the success + // case, consecutive, repeated deletes shouldn't write to storage, so + // they are not avoided. + + if b.replayEnabled && + !prng.FlipWeightedCoin(b.replayRetainFailedProbability) { + + // Limitation: there's a race condition with multiple + // InproxyBrokerClientInstances writing to the replay datastore for + // the same broker, such as in the case where there's a dual-mode + // in-proxy client and proxy; this delete could potentially clobber a + // concurrent fresh replay store after a success. + // + // TODO: add an additional storage key distinguisher for each instance? + + err := DeleteNetworkReplayParameters[InproxyBrokerDialParameters]( + b.networkID, b.brokerDialParams.brokerSpec.BrokerPublicKey) + if err != nil { + NoticeWarning("DeleteBrokerDialParameters failed: %v", errors.Trace(err)) + // Continue without resetting replay. + } + } + + // Invoke resetBrokerClientOnRoundTripperFailed to signal the + // InproxyBrokerClientManager to create a new + // InproxyBrokerClientInstance, with new dial parameters and a new round + // tripper, after a failure. + // + // This InproxyBrokerClientInstance doesn't change its dial parameters or + // round tripper to ensure that any concurrent usage retains affinity + // with the same parameters and broker. + // + // Limitation: a transport-level failure may unnecessarily reset the + // broker session state; see comment in NewInproxyBrokerClientInstance. + + err := b.brokerClientManager.resetBrokerClientOnRoundTripperFailed(b) + if err != nil { + NoticeWarning("reset broker client failed: %v", errors.Trace(err)) + // Continue with old broker client instance. + } +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) AnnounceRequestTimeout() time.Duration { + return b.announceRequestTimeout +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) SessionHandshakeRoundTripTimeout() time.Duration { + return b.sessionHandshakeTimeout +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) AnnounceDelay() time.Duration { + return b.announceDelay +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) AnnounceDelayJitter() float64 { + return b.announceDelayJitter +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) AnswerRequestTimeout() time.Duration { + return b.answerRequestTimeout +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) OfferRequestTimeout() time.Duration { + return b.offerRequestTimeout +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) OfferRetryDelay() time.Duration { + return b.offerRetryDelay +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) OfferRetryJitter() float64 { + return b.offerRetryJitter +} + +// Implements the inproxy.BrokerDialCoordinator interface. +func (b *InproxyBrokerClientInstance) RelayedPacketRequestTimeout() time.Duration { + return b.relayedPacketRequestTimeout +} + +// InproxyBrokerDialParameters represents a selected broker transport and dial +// paramaters. +// +// InproxyBrokerDialParameters is used to configure dialers; as a persistent +// record to store successful dial parameters for replay; and to report dial +// stats in notices and Psiphon API calls. +// +// InproxyBrokerDialParameters is similar to tunnel DialParameters, but is +// specific to the in-proxy broker dial phase. +type InproxyBrokerDialParameters struct { + brokerSpec *parameters.InproxyBrokerSpec `json:"-"` + isReplay bool `json:"-"` + + LastUsedTimestamp time.Time + LastUsedBrokerSpecHash []byte + + NetworkLatencyMultiplier float64 + + BrokerTransport string + + DialAddress string + + FrontingProviderID string + FrontingDialAddress string + SNIServerName string + TransformedHostName bool + VerifyServerName string + VerifyPins []string + HostHeader string + ResolvedIPAddress atomic.Value `json:"-"` + + TLSProfile string + TLSVersion string + RandomizedTLSProfileSeed *prng.Seed + NoDefaultTLSSessionID bool + TLSFragmentClientHello bool + + SelectedUserAgent bool + UserAgent string + + BPFProgramName string + BPFProgramInstructions []bpf.RawInstruction + + FragmentorSeed *prng.Seed + + ResolveParameters *resolver.ResolveParameters + + dialConfig *DialConfig `json:"-"` + meekConfig *MeekConfig `json:"-"` +} + +// MakeInproxyBrokerDialParameters creates a new InproxyBrokerDialParameters. +func MakeInproxyBrokerDialParameters( + config *Config, + p parameters.ParametersAccessor, + networkID string, + brokerSpec *parameters.InproxyBrokerSpec) (*InproxyBrokerDialParameters, error) { + + // This function duplicates some code from MakeDialParameters and + // makeFrontedHTTPClient. To simplify the logic, the Replay + // tactic flags for individual dial components are ignored. + // + // TODO: merge common functionality? + + if config.UseUpstreamProxy() { + return nil, errors.TraceNew("upstream proxy unsupported") + } + + currentTimestamp := time.Now() + + var brokerDialParams *InproxyBrokerDialParameters + + // Select new broker dial parameters + + brokerDialParams = &InproxyBrokerDialParameters{ + brokerSpec: brokerSpec, + LastUsedTimestamp: currentTimestamp, + LastUsedBrokerSpecHash: hashBrokerSpec(brokerSpec), + } + + // Network latency multiplier + + brokerDialParams.NetworkLatencyMultiplier = prng.ExpFloat64Range( + p.Float(parameters.NetworkLatencyMultiplierMin), + p.Float(parameters.NetworkLatencyMultiplierMax), + p.Float(parameters.NetworkLatencyMultiplierLambda)) + + // Select fronting configuration + + var err error + + brokerDialParams.FrontingProviderID, + brokerDialParams.BrokerTransport, + brokerDialParams.FrontingDialAddress, + brokerDialParams.SNIServerName, + brokerDialParams.VerifyServerName, + brokerDialParams.VerifyPins, + brokerDialParams.HostHeader, + err = brokerDialParams.brokerSpec.BrokerFrontingSpecs.SelectParameters() + if err != nil { + return nil, errors.Trace(err) + } + + // At this time, the broker client, the transport is limited to fronted + // HTTPS. + // + // As a future enhancement, allow HTTP for the in-proxy broker case, skip + // selecting TLS tactics and select HTTP tactics such as + // HTTPTransformerParameters. + + if brokerDialParams.BrokerTransport == protocol.FRONTING_TRANSPORT_HTTP { + return nil, errors.TraceNew("unsupported fronting transport") + } + + // Determine and use the equivilent tunnel protocol for tactics + // selections. For example, for the broker transport FRONTED-HTTPS, use + // the tactics for FRONTED-MEEK-OSSH. + + equivilentTunnelProtocol, err := protocol.EquivilentTunnelProtocol(brokerDialParams.BrokerTransport) + if err != nil { + return nil, errors.Trace(err) + } + + // FrontSpec.Addresses may include a port; default to 443 if none. + + if _, _, err := net.SplitHostPort(brokerDialParams.FrontingDialAddress); err == nil { + brokerDialParams.DialAddress = brokerDialParams.FrontingDialAddress + } else { + brokerDialParams.DialAddress = net.JoinHostPort(brokerDialParams.FrontingDialAddress, "443") + } + + // SNI configuration + // + // For a FrontingSpec, an SNI value of "" indicates to disable/omit SNI, so + // never transform in that case. + + if brokerDialParams.SNIServerName != "" { + if p.WeightedCoinFlip(parameters.TransformHostNameProbability) { + brokerDialParams.SNIServerName = selectHostName(equivilentTunnelProtocol, p) + brokerDialParams.TransformedHostName = true + } + } + + // TLS configuration + // + // The requireTLS13 flag is set to true in order to use only modern TLS + // fingerprints which should support HTTP/2 in the ALPN. + // + // TODO: TLS padding, NoDefaultTLSSessionID + + brokerDialParams.TLSProfile, + brokerDialParams.TLSVersion, + brokerDialParams.RandomizedTLSProfileSeed, + err = SelectTLSProfile(false, true, true, brokerDialParams.FrontingProviderID, p) + + brokerDialParams.NoDefaultTLSSessionID = p.WeightedCoinFlip( + parameters.NoDefaultTLSSessionIDProbability) + + if brokerDialParams.SNIServerName != "" && net.ParseIP(brokerDialParams.SNIServerName) == nil { + tlsFragmentorLimitProtocols := p.TunnelProtocols(parameters.TLSFragmentClientHelloLimitProtocols) + if len(tlsFragmentorLimitProtocols) == 0 || common.Contains(tlsFragmentorLimitProtocols, equivilentTunnelProtocol) { + brokerDialParams.TLSFragmentClientHello = p.WeightedCoinFlip(parameters.TLSFragmentClientHelloProbability) + } + } + + // User Agent configuration + + dialCustomHeaders := makeDialCustomHeaders(config, p) + brokerDialParams.SelectedUserAgent, brokerDialParams.UserAgent = selectUserAgentIfUnset(p, dialCustomHeaders) + + // BPF configuration + + if ClientBPFEnabled() && + protocol.TunnelProtocolMayUseClientBPF(equivilentTunnelProtocol) { + + if p.WeightedCoinFlip(parameters.BPFClientTCPProbability) { + brokerDialParams.BPFProgramName = "" + brokerDialParams.BPFProgramInstructions = nil + ok, name, rawInstructions := p.BPFProgram(parameters.BPFClientTCPProgram) + if ok { + brokerDialParams.BPFProgramName = name + brokerDialParams.BPFProgramInstructions = rawInstructions + } + } + } + + // Fragmentor configuration + + brokerDialParams.FragmentorSeed, err = prng.NewSeed() + if err != nil { + return nil, errors.Trace(err) + } + + // Resolver configuration + // + // The custom resolcer is wired up only when there is a domain to be + // resolved; GetMetrics will log resolver metrics when the resolver is set. + + if net.ParseIP(brokerDialParams.FrontingDialAddress) == nil { + + resolver := config.GetResolver() + if resolver == nil { + return nil, errors.TraceNew("missing resolver") + } + + brokerDialParams.ResolveParameters, err = resolver.MakeResolveParameters( + p, brokerDialParams.FrontingProviderID, brokerDialParams.FrontingDialAddress) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Initialize Dial/MeekConfigs to be passed to the corresponding dialers. + + err = brokerDialParams.prepareDialConfigs(config, p, networkID, false, dialCustomHeaders) + if err != nil { + return nil, errors.Trace(err) + } + + return brokerDialParams, nil +} + +// prepareDialConfigs is called for both new and replayed broker dial parameters. +func (brokerDialParams *InproxyBrokerDialParameters) prepareDialConfigs( + config *Config, + p parameters.ParametersAccessor, + networkID string, + isReplay bool, + dialCustomHeaders http.Header) error { + + brokerDialParams.isReplay = isReplay + + equivilentTunnelProtocol, err := protocol.EquivilentTunnelProtocol(brokerDialParams.BrokerTransport) + if err != nil { + return errors.Trace(err) + } + + // Custom headers and User Agent + + if dialCustomHeaders == nil { + dialCustomHeaders = makeDialCustomHeaders(config, p) + } + if brokerDialParams.SelectedUserAgent { + + // Limitation: if config.CustomHeaders adds a User-Agent between + // replays, it may be ignored due to replaying a selected User-Agent. + dialCustomHeaders.Set("User-Agent", brokerDialParams.UserAgent) + } + + // Fragmentor + + fragmentorConfig := fragmentor.NewUpstreamConfig( + p, equivilentTunnelProtocol, brokerDialParams.FragmentorSeed) + + // Resolver + // + // DialConfig.ResolveIP is required and called even when the destination + // is an IP address. + + resolver := config.GetResolver() + if resolver == nil { + return errors.TraceNew("missing resolver") + } + + resolveIP := func(ctx context.Context, hostname string) ([]net.IP, error) { + IPs, err := resolver.ResolveIP( + ctx, networkID, brokerDialParams.ResolveParameters, hostname) + return IPs, errors.Trace(err) + } + + // DialConfig + + brokerDialParams.ResolvedIPAddress.Store("") + + brokerDialParams.dialConfig = &DialConfig{ + DiagnosticID: brokerDialParams.brokerSpec.BrokerPublicKey, + CustomHeaders: dialCustomHeaders, + BPFProgramInstructions: brokerDialParams.BPFProgramInstructions, + DeviceBinder: config.deviceBinder, + IPv6Synthesizer: config.IPv6Synthesizer, + ResolveIP: resolveIP, + TrustedCACertificatesFilename: config.TrustedCACertificatesFilename, + FragmentorConfig: fragmentorConfig, + ResolvedIPCallback: func(IPAddress string) { + brokerDialParams.ResolvedIPAddress.Store(IPAddress) + }, + } + + // MeekDialConfig + // + // The broker round trips use MeekModeWrappedPlaintextRoundTrip without + // meek cookies, so meek obfuscation is not configured. The in-proxy + // broker session payloads have their own obfuscation layer. + + addPsiphonFrontingHeader := false + if brokerDialParams.FrontingProviderID != "" { + addPsiphonFrontingHeader = common.Contains( + p.LabeledTunnelProtocols( + parameters.AddFrontingProviderPsiphonFrontingHeader, + brokerDialParams.FrontingProviderID), + equivilentTunnelProtocol) + } + + brokerDialParams.meekConfig = &MeekConfig{ + Mode: MeekModeWrappedPlaintextRoundTrip, + DiagnosticID: brokerDialParams.FrontingProviderID, + Parameters: config.GetParameters(), + DialAddress: brokerDialParams.DialAddress, + TLSProfile: brokerDialParams.TLSProfile, + NoDefaultTLSSessionID: brokerDialParams.NoDefaultTLSSessionID, + RandomizedTLSProfileSeed: brokerDialParams.RandomizedTLSProfileSeed, + SNIServerName: brokerDialParams.SNIServerName, + AddPsiphonFrontingHeader: addPsiphonFrontingHeader, + VerifyServerName: brokerDialParams.VerifyServerName, + VerifyPins: brokerDialParams.VerifyPins, + HostHeader: brokerDialParams.HostHeader, + TransformedHostName: brokerDialParams.TransformedHostName, + NetworkLatencyMultiplier: brokerDialParams.NetworkLatencyMultiplier, + AdditionalHeaders: config.MeekAdditionalHeaders, + } + + switch brokerDialParams.BrokerTransport { + case protocol.FRONTING_TRANSPORT_HTTPS: + brokerDialParams.meekConfig.UseHTTPS = true + case protocol.FRONTING_TRANSPORT_QUIC: + brokerDialParams.meekConfig.UseQUIC = true + } + + return nil +} + +// GetMetrics implements the common.MetricsSource interface and returns log +// fields detailing the broker dial parameters. +func (brokerDialParams *InproxyBrokerDialParameters) GetMetrics() common.LogFields { + + logFields := make(common.LogFields) + + logFields["inproxy_broker_transport"] = brokerDialParams.BrokerTransport + + isReplay := "0" + if brokerDialParams.isReplay { + isReplay = "1" + } + logFields["inproxy_broker_is_replay"] = isReplay + + // Note: as At the broker client transport is currently limited to domain + // fronted HTTPS, the following related parameters are included + // unconditionally. + + logFields["inproxy_broker_fronting_provider_id"] = brokerDialParams.FrontingProviderID + + logFields["inproxy_broker_dial_address"] = brokerDialParams.FrontingDialAddress + + resolvedIPAddress := brokerDialParams.ResolvedIPAddress.Load().(string) + if resolvedIPAddress != "" { + logFields["inproxy_broker_resolved_ip_address"] = resolvedIPAddress + } + + if brokerDialParams.SNIServerName != "" { + logFields["inproxy_broker_sni_server_name"] = brokerDialParams.SNIServerName + } + + logFields["inproxy_broker_host_header"] = brokerDialParams.HostHeader + + transformedHostName := "0" + if brokerDialParams.TransformedHostName { + transformedHostName = "1" + } + logFields["inproxy_broker_transformed_host_name"] = transformedHostName + + if brokerDialParams.UserAgent != "" { + logFields["inproxy_broker_user_agent"] = brokerDialParams.UserAgent + } + + if brokerDialParams.BrokerTransport == protocol.FRONTING_TRANSPORT_HTTPS { + + if brokerDialParams.TLSProfile != "" { + logFields["inproxy_broker_tls_profile"] = brokerDialParams.TLSProfile + } + + logFields["inproxy_broker_tls_version"] = brokerDialParams.TLSVersion + + tlsFragmented := "0" + if brokerDialParams.TLSFragmentClientHello { + tlsFragmented = "1" + } + logFields["inproxy_broker_tls_fragmented"] = tlsFragmented + } + + if brokerDialParams.BPFProgramName != "" { + logFields["inproxy_broker_client_bpf"] = brokerDialParams.BPFProgramName + } + + if brokerDialParams.ResolveParameters != nil { + + // See comment for dialParams.ResolveParameters handling in + // getBaseAPIParameters. + + if brokerDialParams.ResolveParameters.PreresolvedIPAddress != "" { + dialDomain, _, _ := net.SplitHostPort(brokerDialParams.DialAddress) + if brokerDialParams.ResolveParameters.PreresolvedDomain == dialDomain { + logFields["inproxy_broker_dns_preresolved"] = brokerDialParams.ResolveParameters.PreresolvedIPAddress + } + } + + if brokerDialParams.ResolveParameters.PreferAlternateDNSServer { + logFields["inproxy_broker_dns_preferred"] = brokerDialParams.ResolveParameters.AlternateDNSServer + } + + if brokerDialParams.ResolveParameters.ProtocolTransformName != "" { + logFields["inproxy_broker_dns_transform"] = brokerDialParams.ResolveParameters.ProtocolTransformName + } + + logFields["inproxy_broker_dns_attempt"] = strconv.Itoa( + brokerDialParams.ResolveParameters.GetFirstAttemptWithAnswer()) + } + + // TODO: get fragmentor metrics, if any, from MeekConn. + + return logFields +} + +// hashBrokerSpec hashes the broker spec. The hash is used to detect when +// broker spec tactics have changed. +func hashBrokerSpec(spec *parameters.InproxyBrokerSpec) []byte { + var hash [8]byte + binary.BigEndian.PutUint64( + hash[:], + uint64(xxhash.Sum64String(fmt.Sprintf("%+v", spec)))) + return hash[:] +} + +// InproxyBrokerRoundTripper is a broker request round trip transport +// implemented using MeekConn in MeekModePlaintextRoundTrip mode, utilizing +// MeekConn's domain fronting capabilities and using persistent and +// multiplexed connections, via HTTP/2, to support multiple concurrent +// in-flight round trips. +// +// InproxyBrokerRoundTripper implements the inproxy.RoundTripper interface. +type InproxyBrokerRoundTripper struct { + brokerDialParams *InproxyBrokerDialParameters + runCtx context.Context + stopRunning context.CancelFunc + dial int32 + dialCompleted chan struct{} + dialErr error + conn *MeekConn + failureThreshold time.Duration +} + +// NewInproxyBrokerRoundTripper creates a new InproxyBrokerRoundTripper. The +// initial DialMeek is defered until the first call to RoundTrip, so +// NewInproxyBrokerRoundTripper does not perform any network operations. +// +// The input brokerDialParams dial parameter and config fields must not +// modifed after NewInproxyBrokerRoundTripper is called. +func NewInproxyBrokerRoundTripper( + p parameters.ParametersAccessor, + brokerDialParams *InproxyBrokerDialParameters) *InproxyBrokerRoundTripper { + + runCtx, stopRunning := context.WithCancel(context.Background()) + + return &InproxyBrokerRoundTripper{ + brokerDialParams: brokerDialParams, + runCtx: runCtx, + stopRunning: stopRunning, + dialCompleted: make(chan struct{}), + failureThreshold: p.Duration( + parameters.InproxyBrokerRoundTripStatusCodeFailureThreshold), + } +} + +// Close interrupts any in-flight request and closes the underlying +// MeekConn. +func (rt *InproxyBrokerRoundTripper) Close() error { + + // Interrupt any DialMeek or RoundTrip. + rt.stopRunning() + + if atomic.CompareAndSwapInt32(&rt.dial, 0, 1) { + + // RoundTrip has not yet been called or has not yet kicked off + // DialMeek, so there is no MeekConn to close. Prevent any future + // DialMeek by signaling dialCompleted and fail any future round trip + // attempt by setting dialErr. + + rt.dialErr = errors.TraceNew("closed") + close(rt.dialCompleted) + + } else { + + // Await any ongoing DialMeek or RoundTrip (stopRunning should + // interrupt either one quickly). + + <-rt.dialCompleted + if rt.conn != nil { + _ = rt.conn.Close() + } + } + + // As with MeekConn.Close, any Close errors from underlying conns are not + // propagated. + return nil +} + +// RoundTrip transports a request to the broker endpoint and returns a +// response. +func (rt *InproxyBrokerRoundTripper) RoundTrip( + ctx context.Context, + roundTripDelay time.Duration, + roundTripTimeout time.Duration, + requestPayload []byte) (_ []byte, retErr error) { + + defer func() { + // Log any error which results in invoking BrokerClientRoundTripperFailed. + var failedError *inproxy.RoundTripperFailedError + if std_errors.As(retErr, &failedError) { + NoticeWarning("RoundTripperFailedError: %v", retErr) + } + }() + + // Cancel DialMeek or MeekConn.RoundTrip when: + // - Close is called + // - the input context is done + ctx, cancelFunc := common.MergeContextCancel(ctx, rt.runCtx) + defer cancelFunc() + + // Apply any round trip delay. Currently, this is used to apply an + // announce request delay post-waitToShareSession, pre-network round + // trip, and cancelable by the above merged context. + if roundTripDelay > 0 { + common.SleepWithContext(ctx, roundTripDelay) + } + + // Apply the round trip timeout after any delay is complete. + // + // This timeout includes any TLS handshake network round trips, as + // performed by the initial DialMeek and may be performed subsequently by + // net/http via MeekConn.RoundTrip. These extra round trips should be + // accounted for in the in the difference between client-side request + // timeouts, such as InproxyProxyAnswerRequestTimeout, and broker-side + // handler timeouts, such as InproxyBrokerProxyAnnounceTimeout, with the + // former allowing more time for network round trips. + + requestCtx := ctx + if roundTripTimeout > 0 { + var requestCancelFunc context.CancelFunc + requestCtx, requestCancelFunc = context.WithTimeout(ctx, roundTripTimeout) + defer requestCancelFunc() + } + + // The first RoundTrip caller will perform the DialMeek step, which + // establishes the TLS trasport connection to the fronted endpoint. + // Following callers will await that DialMeek or share an established + // connection. + // + // To accomodate using custom utls fingerprints, with varying ALPNs, with + // net/http, DialMeek completes a full TLS handshake before instantiating + // the appropriate http.Transport or http2.Transport. Until that first + // DialMeek completes, and unlike standard net/http round trips, + // InproxyBrokerRoundTripper won't spawn distinct TLS persistent + // connections for concurrent round trips. After DialMeek, concurrent + // round trips over HTTP/2 connections may simply share the one TLS + // connection, while concurrent round trips over HTTP connections may + // spawn additional TLS persistent connections. + // + // There is no retry here if DialMeek fails, as higher levels will invoke + // BrokerClientRoundTripperFailed on failure, clear any replay, select + // new dial parameters, and retry. + + if atomic.CompareAndSwapInt32(&rt.dial, 0, 1) { + + // DialMeek hasn't been called yet. + + conn, err := DialMeek( + requestCtx, + rt.brokerDialParams.meekConfig, + rt.brokerDialParams.dialConfig) + + if err != nil && ctx.Err() != context.Canceled { + + // DialMeek performs an initial TLS handshake. DialMeek errors, + // excluding a cancelled context as happens on shutdown, are + // classified as as RoundTripperFailedErrors, which will invoke + // BrokerClientRoundTripperFailed, resetting the round tripper + // and clearing replay parameters. + + err = inproxy.NewRoundTripperFailedError(err) + } + + rt.conn = conn + rt.dialErr = err + close(rt.dialCompleted) + + if err != nil { + return nil, errors.Trace(rt.dialErr) + } + + } else { + + // Await any ongoing DialMeek run by a concurrent RoundTrip caller. + + select { + case <-rt.dialCompleted: + case <-ctx.Done(): + return nil, errors.Trace(ctx.Err()) + } + + if rt.dialErr != nil { + + // There is no NewRoundTripperFailedError wrapping here, as the + // DialMeek caller will wrap its error and + // BrokerClientRoundTripperFailed will be invoked already. + + return nil, errors.Trace(rt.dialErr) + } + } + + // At this point, rt.conn is an established MeekConn. + + // Note that the network address portion of the URL will be ignored by + // MeekConn in favor of the MeekDialConfig, while the path will be used. + url := fmt.Sprintf( + "https://%s/%s", + rt.brokerDialParams.DialAddress, + inproxy.BrokerEndPointName) + + request, err := http.NewRequestWithContext( + requestCtx, "POST", url, bytes.NewBuffer(requestPayload)) + if err != nil { + return nil, errors.Trace(err) + } + + startTime := time.Now() + response, err := rt.conn.RoundTrip(request) + roundTripDuration := time.Since(startTime) + + if err == nil { + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + + err = fmt.Errorf( + "unexpected response status code %d after %v", + response.StatusCode, + roundTripDuration) + + // Depending on the round trip duration, this case is treated as a + // temporary round tripper failure, since we received a response + // from the CDN, secured with TLS and VerifyPins, or from broker + // itself. One common scenario is the CDN returning a temporary + // timeout error, as can happen when CDN timeouts and broker + // timeouts are misaligned, especially for long-polling requests. + // + // In this scenario, we can reuse the existing round tripper and + // it may be counterproductive to return a RoundTripperFailedError + // which will trigger a clearing of any broker dial replay + // parameters as well as reseting the round tripper. + // + // When the round trip duration is sufficiently short, much + // shorter than expected round trip timeouts, this is still + // classified as a RoundTripperFailedError error, as it is more + // likely due to a more serious issue between the CDN and broker. + + if rt.failureThreshold > 0 && + roundTripDuration <= rt.failureThreshold { + + err = inproxy.NewRoundTripperFailedError(err) + } + } + } else if ctx.Err() != context.Canceled { + + // Other round trip errors, including TLS failures and client-side + // timeouts, but excluding a cancelled context as happens on + // shutdown, are classified as RoundTripperFailedErrors. + + err = inproxy.NewRoundTripperFailedError(err) + } + if err != nil { + return nil, errors.Trace(err) + } + + responsePayload, err := io.ReadAll(response.Body) + if err != nil { + err = inproxy.NewRoundTripperFailedError(err) + return nil, errors.Trace(err) + } + + return responsePayload, nil +} + +// InproxyWebRTCDialInstance is the network state and dial parameters for a +// single WebRTC client or proxy connection. +// +// InproxyWebRTCDialInstance implements the inproxy.WebRTCDialCoordinator +// interface, which provides the WebRTC dial configuration and support to the +// in-proxy package. +type InproxyWebRTCDialInstance struct { + config *Config + networkID string + natStateManager *InproxyNATStateManager + + stunDialParameters *InproxySTUNDialParameters + webRTCDialParameters *InproxyWebRTCDialParameters + + discoverNAT bool + disableSTUN bool + disablePortMapping bool + disableInboundForMobileNetworks bool + disableIPv6ICECandidates bool + discoverNATTimeout time.Duration + webRTCAnswerTimeout time.Duration + awaitDataChannelTimeout time.Duration + proxyDestinationDialTimeout time.Duration +} + +// NewInproxyWebRTCDialInstance creates a new InproxyWebRTCDialInstance. +// +// The caller provides STUN and WebRTC dial parameters that are either newly +// generated or replayed. Proxies may optionally pass in nil for either +// stunDialParameters or webRTCDialParameters, and new parameters will be +// generated. +func NewInproxyWebRTCDialInstance( + config *Config, + networkID string, + isProxy bool, + natStateManager *InproxyNATStateManager, + stunDialParameters *InproxySTUNDialParameters, + webRTCDialParameters *InproxyWebRTCDialParameters) (*InproxyWebRTCDialInstance, error) { + + p := config.GetParameters().Get() + defer p.Close() + + if isProxy && stunDialParameters == nil { + // Auto-generate STUN dial parameters. There's no replay in this case. + var err error + stunDialParameters, err = MakeInproxySTUNDialParameters(config, p, isProxy) + if err != nil { + return nil, errors.Trace(err) + } + } + + if isProxy && webRTCDialParameters == nil { + // Auto-generate STUN dial parameters. There's no replay in this case. + var err error + webRTCDialParameters, err = MakeInproxyWebRTCDialParameters(p) + if err != nil { + return nil, errors.Trace(err) + } + } + + disableSTUN := p.Bool(parameters.InproxyDisableSTUN) + disablePortMapping := p.Bool(parameters.InproxyDisablePortMapping) + disableInboundForMobileNetworks := p.Bool(parameters.InproxyDisableInboundForMobileNetworks) + disableIPv6ICECandidates := p.Bool(parameters.InproxyDisableIPv6ICECandidates) + + var discoverNATTimeout, awaitDataChannelTimeout time.Duration + + if isProxy { + + disableSTUN = disableSTUN || p.Bool(parameters.InproxyProxyDisableSTUN) + + disablePortMapping = disablePortMapping || p.Bool(parameters.InproxyProxyDisablePortMapping) + + disableInboundForMobileNetworks = disableInboundForMobileNetworks || + p.Bool(parameters.InproxyProxyDisableInboundForMobileNetworks) + + disableIPv6ICECandidates = disableIPv6ICECandidates || + p.Bool(parameters.InproxyProxyDisableIPv6ICECandidates) + + discoverNATTimeout = p.Duration(parameters.InproxyProxyDiscoverNATTimeout) + + awaitDataChannelTimeout = p.Duration(parameters.InproxyProxyWebRTCAwaitDataChannelTimeout) + + } else { + + disableSTUN = disableSTUN || p.Bool(parameters.InproxyClientDisableSTUN) + + disablePortMapping = disablePortMapping || p.Bool(parameters.InproxyClientDisablePortMapping) + + disableInboundForMobileNetworks = disableInboundForMobileNetworks || + p.Bool(parameters.InproxyClientDisableInboundForMobileNetworks) + + disableIPv6ICECandidates = disableIPv6ICECandidates || + p.Bool(parameters.InproxyClientDisableIPv6ICECandidates) + + discoverNATTimeout = p.Duration(parameters.InproxyClientDiscoverNATTimeout) + + awaitDataChannelTimeout = p.Duration(parameters.InproxyClientWebRTCAwaitDataChannelTimeout) + } + + // Parameters such as disabling certain operations and operation timeouts + // are not replayed, but snapshots are stored in the + // InproxyWebRTCDialInstance for efficient lookup. + + return &InproxyWebRTCDialInstance{ + config: config, + networkID: networkID, + natStateManager: natStateManager, + + stunDialParameters: stunDialParameters, + webRTCDialParameters: webRTCDialParameters, + + // discoverNAT is ignored by proxies, which always attempt discovery. + // webRTCAnswerTimeout and proxyDestinationDialTimeout are used only + // by proxies. + + discoverNAT: p.WeightedCoinFlip(parameters.InproxyClientDiscoverNATProbability), + disableSTUN: disableSTUN, + disablePortMapping: disablePortMapping, + disableInboundForMobileNetworks: disableInboundForMobileNetworks, + disableIPv6ICECandidates: disableIPv6ICECandidates, + discoverNATTimeout: discoverNATTimeout, + webRTCAnswerTimeout: p.Duration(parameters.InproxyWebRTCAnswerTimeout), + awaitDataChannelTimeout: awaitDataChannelTimeout, + proxyDestinationDialTimeout: p.Duration(parameters.InproxyProxyDestinationDialTimeout), + }, nil +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) NetworkID() string { + return w.networkID +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) NetworkType() inproxy.NetworkType { + return getInproxyNetworkType(GetNetworkType(w.networkID)) +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) ClientRootObfuscationSecret() inproxy.ObfuscationSecret { + return w.webRTCDialParameters.RootObfuscationSecret +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DoDTLSRandomization() bool { + return w.webRTCDialParameters.DoDTLSRandomization +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DataChannelTrafficShapingParameters() *inproxy.DataChannelTrafficShapingParameters { + return w.webRTCDialParameters.DataChannelTrafficShapingParameters +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) STUNServerAddress(RFC5780 bool) string { + if RFC5780 { + return w.stunDialParameters.STUNServerAddressRFC5780 + } else { + return w.stunDialParameters.STUNServerAddress + } +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) STUNServerAddressResolved(RFC5780 bool) string { + if RFC5780 { + return w.stunDialParameters.STUNServerAddressRFC5780 + } else { + return w.stunDialParameters.STUNServerAddress + } +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) STUNServerAddressSucceeded(RFC5780 bool, address string) { + + // Currently, for client tunnel dials, STUN dial parameter replay is + // managed by DialParameters and DialParameters.InproxySTUNDialParameters + // are replayed only when the entire dial succeeds. + // + // Note that, for a client tunnel dial, even if the STUN step fails and + // there are no STUN ICE candidates, the subsequent WebRTC connection may + // still proceed and be successful. In this case, the failed STUN dial + // parameters may be replayed. + // + // For proxies, there is no STUN dial parameter replay. + // + // As a future enhancement, consider independent and shared replay of + // working STUN servers, similar to how broker client dial parameters are + // replayed independent of overall dials and proxy relays, and shared + // between local client and proxy instances. + + // Verify/extend the resolver cache entry for any resolved domain after a + // success. + + resolver := w.config.GetResolver() + if resolver != nil { + resolver.VerifyCacheExtension(address) + } +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) STUNServerAddressFailed(RFC5780 bool, address string) { + // Currently there is no independent replay for STUN dial parameters. See + // comment in STUNServerAddressSucceeded. +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DiscoverNAT() bool { + return w.discoverNAT +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DisableSTUN() bool { + return w.disableSTUN +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DisablePortMapping() bool { + return w.disablePortMapping +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DisableInboundForMobileNetworks() bool { + return w.disableInboundForMobileNetworks +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DisableIPv6ICECandidates() bool { + return w.disableIPv6ICECandidates +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) NATType() inproxy.NATType { + return w.natStateManager.getNATType(w.networkID) +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) SetNATType(natType inproxy.NATType) { + w.natStateManager.setNATType(w.networkID, natType) +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) PortMappingTypes() inproxy.PortMappingTypes { + return w.natStateManager.getPortMappingTypes(w.networkID) +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) SetPortMappingTypes(portMappingTypes inproxy.PortMappingTypes) { + w.natStateManager.setPortMappingTypes(w.networkID, portMappingTypes) +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) ResolveAddress(ctx context.Context, network, address string) (string, error) { + + // Use the Psiphon resolver to resolve addresses. + + r := w.config.GetResolver() + if r == nil { + return "", errors.TraceNew("missing resolver") + } + + // Identify when the address to be resolved is one of the configured STUN + // servers, and, in those cases, use/replay any STUN dial parameters + // ResolveParameters; and record the resolved IP address for metrics. + // + // In the in-proxy proxy case, ResolveAddress is invoked for the upstream, + // 2nd hop dial as well as for STUN server addresses. + // + // Limitation: there's no ResolveParameters, including no preresolved DNS + // tactics, for 2nd hop dials. + + isSTUNServerAddress := address == w.stunDialParameters.STUNServerAddress + isSTUNServerAddressRFC5780 := address == w.stunDialParameters.STUNServerAddressRFC5780 + var resolveParams *resolver.ResolveParameters + if isSTUNServerAddress || isSTUNServerAddressRFC5780 { + resolveParams = w.stunDialParameters.ResolveParameters + } + + resolved, err := r.ResolveAddress( + ctx, w.networkID, resolveParams, network, address) + if err != nil { + return "", errors.Trace(err) + } + + // Invoke the resolved IP callbacks only when the input is not the + // resolved IP address (this differs from the meek + // DialConfig.ResolvedIPCallback case). + + if resolved != address { + if isSTUNServerAddress { + w.stunDialParameters.STUNServerResolvedIPAddress.Store(resolved) + } else if isSTUNServerAddressRFC5780 { + w.stunDialParameters.STUNServerRFC5780ResolvedIPAddress.Store(resolved) + } + } + + return resolved, nil +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) UDPListen(ctx context.Context) (net.PacketConn, error) { + + // Create a new inproxyUDPConn for use as the in-proxy STUN and/ord WebRTC + // UDP socket. + + conn, err := newInproxyUDPConn(ctx, w.config) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) UDPConn( + ctx context.Context, network, remoteAddress string) (net.PacketConn, error) { + + // Create a new UDPConn bound to the specified remote address. This UDP + // conn is used, by the inproxy package, to determine the local address + // of the active interface the OS will select for the specified remote + // destination. + // + // Only IP address destinations are supported. ResolveIP is wired up only + // because NewUDPConn requires a non-nil resolver. + + dialConfig := &DialConfig{ + DeviceBinder: w.config.deviceBinder, + IPv6Synthesizer: w.config.IPv6Synthesizer, + ResolveIP: func(_ context.Context, hostname string) ([]net.IP, error) { + IP := net.ParseIP(hostname) + if IP == nil { + return nil, errors.TraceNew("not supported") + } + return []net.IP{IP}, nil + }, + } + + conn, _, err := NewUDPConn(ctx, network, true, "", remoteAddress, dialConfig) + if err != nil { + return nil, errors.Trace(err) + } + + return conn, nil +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) BindToDevice(fileDescriptor int) error { + + if w.config.deviceBinder == nil { + return nil + } + + // Use config.deviceBinder, with wired up logging, not + // config.DeviceBinder; other tunnel-core dials do this indirectly via + // psiphon.DialConfig. + + _, err := w.config.deviceBinder.BindToDevice(fileDescriptor) + return errors.Trace(err) +} + +func (w *InproxyWebRTCDialInstance) ProxyUpstreamDial( + ctx context.Context, network, address string) (net.Conn, error) { + + // This implementation of ProxyUpstreamDial applies additional socket + // options and BindToDevice as required, but is otherwise a stock dialer. + // + // TODO: Use custom UDP and TCP dialers, and wire up TCP/UDP-level + // tactics, including BPF and the custom resolver, which may be enabled + // for the proxy's ISP or geolocation. Orchestrating preresolved DNS + // requires additional information from either from the broker, the + // FrontingProviderID, to be applied to any + // DNSResolverPreresolvedIPAddressCIDRs proxy tactics. In addition, + // replay the selected upstream dial tactics parameters. + + dialer := net.Dialer{ + Control: func(_, _ string, c syscall.RawConn) error { + var controlErr error + err := c.Control(func(fd uintptr) { + + socketFD := int(fd) + + setAdditionalSocketOptions(socketFD) + + if w.config.deviceBinder != nil { + _, err := w.config.deviceBinder.BindToDevice(socketFD) + if err != nil { + controlErr = errors.Tracef("BindToDevice failed: %s", err) + return + } + } + }) + if controlErr != nil { + return errors.Trace(controlErr) + } + return errors.Trace(err) + }, + } + + conn, err := dialer.DialContext(ctx, network, address) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) DiscoverNATTimeout() time.Duration { + return w.discoverNATTimeout +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) WebRTCAnswerTimeout() time.Duration { + return w.webRTCAnswerTimeout +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) WebRTCAwaitDataChannelTimeout() time.Duration { + return w.awaitDataChannelTimeout +} + +// Implements the inproxy.WebRTCDialCoordinator interface. +func (w *InproxyWebRTCDialInstance) ProxyDestinationDialTimeout() time.Duration { + return w.proxyDestinationDialTimeout +} + +// InproxySTUNDialParameters is a set of STUN dial parameters. +// InproxySTUNDialParameters is compatible with DialParameters JSON +// marshaling. For client in-proxy tunnel dials, DialParameters will manage +// STUN dial parameter selection and replay. +// +// When an instance of InproxySTUNDialParameters is unmarshaled from JSON, +// Prepare must be called to initialize the instance for use. +type InproxySTUNDialParameters struct { + ResolveParameters *resolver.ResolveParameters + STUNServerAddress string + STUNServerAddressRFC5780 string + + STUNServerResolvedIPAddress atomic.Value `json:"-"` + STUNServerRFC5780ResolvedIPAddress atomic.Value `json:"-"` +} + +// MakeInproxySTUNDialParameters generates new STUN dial parameters from the +// given tactics parameters. +func MakeInproxySTUNDialParameters( + config *Config, + p parameters.ParametersAccessor, + isProxy bool) (*InproxySTUNDialParameters, error) { + + var stunServerAddresses, stunServerAddressesRFC5780 []string + if isProxy { + stunServerAddresses = p.Strings( + parameters.InproxyProxySTUNServerAddresses, parameters.InproxySTUNServerAddresses) + stunServerAddressesRFC5780 = p.Strings( + parameters.InproxyProxySTUNServerAddressesRFC5780, parameters.InproxySTUNServerAddressesRFC5780) + } else { + stunServerAddresses = p.Strings( + parameters.InproxyClientSTUNServerAddresses, parameters.InproxySTUNServerAddresses) + stunServerAddressesRFC5780 = p.Strings( + parameters.InproxyClientSTUNServerAddressesRFC5780, parameters.InproxySTUNServerAddressesRFC5780) + } + + // Empty STUN server address lists are not an error condition. When used + // for WebRTC, the STUN ICE candidate gathering will be skipped but the + // WebRTC connection may still be established via other candidate types. + + var stunServerAddress, stunServerAddressRFC5780 string + + if len(stunServerAddresses) > 0 { + stunServerAddress = stunServerAddresses[prng.Range(0, len(stunServerAddresses)-1)] + } + + if len(stunServerAddressesRFC5780) > 0 { + stunServerAddressRFC5780 = + stunServerAddressesRFC5780[prng.Range(0, len(stunServerAddressesRFC5780)-1)] + } + + // Create DNS resolver dial parameters to use when resolving STUN server + // domain addresses. Instantiate only when there is a domain to be + // resolved; when recording DNS fields, GetMetrics will assume that a nil + // InproxySTUNDialParameters.ResolveParameters implies no resolve was + // attempted. + + var resolveParameters *resolver.ResolveParameters + + if (stunServerAddress != "" && net.ParseIP(stunServerAddress) == nil) || + (stunServerAddressRFC5780 != "" && net.ParseIP(stunServerAddressRFC5780) == nil) { + + // No DNSResolverPreresolvedIPAddressCIDRs will be selected since no + // fronting provider ID is specified. + // + // It would be possible to overload the meaning of the fronting + // provider ID field by using a string derived from STUN server + // address as the key. + // + // However, preresolved STUN configuration can already be achieved + // with IP addresses in the STUNServerAddresses tactics parameters. + // This approach results in slightly different metrics log fields vs. + // preresolved. + + var err error + resolveParameters, err = config.GetResolver().MakeResolveParameters(p, "", "") + if err != nil { + return nil, errors.Trace(err) + } + } + + dialParams := &InproxySTUNDialParameters{ + ResolveParameters: resolveParameters, + STUNServerAddress: stunServerAddress, + STUNServerAddressRFC5780: stunServerAddressRFC5780, + } + + dialParams.Prepare() + + return dialParams, nil +} + +// Prepare initializes an InproxySTUNDialParameters for use. Prepare should be +// called for any InproxySTUNDialParameters instance unmarshaled from JSON. +func (dialParams *InproxySTUNDialParameters) Prepare() { + dialParams.STUNServerResolvedIPAddress.Store("") + dialParams.STUNServerRFC5780ResolvedIPAddress.Store("") +} + +// IsValidClientReplay checks that the selected STUN servers remain configured +// STUN server candidates for in-proxy clients. +func (dialParams *InproxySTUNDialParameters) IsValidClientReplay( + p parameters.ParametersAccessor) bool { + + return (dialParams.STUNServerAddress == "" || + common.Contains( + p.Strings(parameters.InproxyClientSTUNServerAddresses), + dialParams.STUNServerAddress)) && + + (dialParams.STUNServerAddressRFC5780 == "" || + common.Contains( + p.Strings(parameters.InproxyClientSTUNServerAddressesRFC5780), + dialParams.STUNServerAddressRFC5780)) +} + +// GetMetrics implements the common.MetricsSource interface and returns log +// fields detailing the STUN dial parameters. +func (dialParams *InproxySTUNDialParameters) GetMetrics() common.LogFields { + + // There is no is_replay-type field added here; replay is handled at a + // higher level, and, for client in-proxy tunnel dials, is part of the + // main tunnel dial parameters. + + logFields := make(common.LogFields) + + logFields["inproxy_webrtc_stun_server"] = dialParams.STUNServerAddress + + resolvedIPAddress := dialParams.STUNServerResolvedIPAddress.Load().(string) + if resolvedIPAddress != "" { + logFields["inproxy_webrtc_stun_server_resolved_ip_address"] = resolvedIPAddress + } + + // TODO: log RFC5780 selection only if used? + logFields["inproxy_webrtc_stun_server_RFC5780"] = dialParams.STUNServerAddressRFC5780 + + resolvedIPAddress = dialParams.STUNServerRFC5780ResolvedIPAddress.Load().(string) + if resolvedIPAddress != "" { + logFields["inproxy_webrtc_stun_server_RFC5780_resolved_ip_address"] = resolvedIPAddress + } + + if dialParams.ResolveParameters != nil { + + // See comment in getBaseAPIParameters regarding + // dialParams.ResolveParameters handling. As noted in + // MakeInproxySTUNDialParameters, no preresolved parameters are set, + // so none are checked for logging. + // + // Limitation: the potential use of single ResolveParameters to + // resolve multiple, different STUN server domains can skew the + // meaning of GetFirstAttemptWithAnswer. + + if dialParams.ResolveParameters.PreferAlternateDNSServer { + logFields["inproxy_webrtc_dns_preferred"] = dialParams.ResolveParameters.AlternateDNSServer + } + + if dialParams.ResolveParameters.ProtocolTransformName != "" { + logFields["inproxy_webrtc_dns_transform"] = dialParams.ResolveParameters.ProtocolTransformName + } + + logFields["inproxy_webrtc_dns_attempt"] = strconv.Itoa( + dialParams.ResolveParameters.GetFirstAttemptWithAnswer()) + } + return logFields +} + +// InproxyWebRTCDialParameters is a set of WebRTC obfuscation dial parameters. +// InproxyWebRTCDialParameters is compatible with DialParameters JSON +// marshaling. For client in-proxy tunnel dials, DialParameters will manage +// WebRTC dial parameter selection and replay. +type InproxyWebRTCDialParameters struct { + RootObfuscationSecret inproxy.ObfuscationSecret + DataChannelTrafficShapingParameters *inproxy.DataChannelTrafficShapingParameters + DoDTLSRandomization bool +} + +// MakeInproxyWebRTCDialParameters generates new InproxyWebRTCDialParameters. +func MakeInproxyWebRTCDialParameters( + p parameters.ParametersAccessor) (*InproxyWebRTCDialParameters, error) { + + rootObfuscationSecret, err := inproxy.GenerateRootObfuscationSecret() + if err != nil { + return nil, errors.Trace(err) + } + + var trafficSharingParams inproxy.DataChannelTrafficShapingParameters + if p.WeightedCoinFlip(parameters.InproxyDataChannelTrafficShapingProbability) { + trafficSharingParams = inproxy.DataChannelTrafficShapingParameters( + p.InproxyDataChannelTrafficShapingParameters( + parameters.InproxyDataChannelTrafficShapingParameters)) + } + + doDTLSRandomization := p.WeightedCoinFlip(parameters.InproxyDTLSRandomizationProbability) + + return &InproxyWebRTCDialParameters{ + RootObfuscationSecret: rootObfuscationSecret, + DataChannelTrafficShapingParameters: &trafficSharingParams, + DoDTLSRandomization: doDTLSRandomization, + }, nil +} + +// GetMetrics implements the common.MetricsSource interface. +func (dialParams *InproxyWebRTCDialParameters) GetMetrics() common.LogFields { + + // There is no is_replay-type field added here; replay is handled at a + // higher level, and, for client in-proxy tunnel dials, is part of the + // main tunnel dial parameters. + + // Currently, all WebRTC metrics are delivered via + // inproxy.ClientConn/WebRTCConn GetMetrics. + return common.LogFields{} +} + +// InproxyNATStateManager manages the NAT-related network topology state for +// the current network, caching the discovered network NAT type and supported +// port mapping types, if any. +type InproxyNATStateManager struct { + config *Config + + mutex sync.Mutex + networkID string + natType inproxy.NATType + portMappingTypes inproxy.PortMappingTypes +} + +// NewInproxyNATStateManager creates a new InproxyNATStateManager. +func NewInproxyNATStateManager(config *Config) *InproxyNATStateManager { + + s := &InproxyNATStateManager{ + config: config, + natType: inproxy.NATTypeUnknown, + portMappingTypes: inproxy.PortMappingTypes{}, + } + + s.reset() + + return s +} + +// TacticsApplied implements the TacticsAppliedReceiver interface, and is +// called when tactics have changed, which triggers a cached NAT state reset +// in order to apply potentially changed parameters. +func (s *InproxyNATStateManager) TacticsApplied() error { + s.reset() + return nil +} + +func (s *InproxyNATStateManager) reset() { + + // Assumes s.mutex lock is held. + + networkID := s.config.GetNetworkID() + + s.networkID = networkID + s.natType = inproxy.NATTypeUnknown + s.portMappingTypes = inproxy.PortMappingTypes{} +} + +func (s *InproxyNATStateManager) getNATType( + networkID string) inproxy.NATType { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.networkID != networkID { + return inproxy.NATTypeUnknown + } + + return s.natType +} + +func (s *InproxyNATStateManager) setNATType( + networkID string, natType inproxy.NATType) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.networkID != networkID { + return + } + + s.natType = natType +} + +func (s *InproxyNATStateManager) getPortMappingTypes( + networkID string) inproxy.PortMappingTypes { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.networkID != networkID { + return inproxy.PortMappingTypes{} + } + + return s.portMappingTypes +} + +func (s *InproxyNATStateManager) setPortMappingTypes( + networkID string, portMappingTypes inproxy.PortMappingTypes) { + + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.networkID != networkID { + return + } + + s.portMappingTypes = portMappingTypes +} + +// inproxyUDPConn is based on NewUDPConn and includes the write timeout +// workaround from common.WriteTimeoutUDPConn. +// +// inproxyUDPConn expands the NewUDPConn IPv6Synthesizer to support many +// destination addresses, as the inproxyUDPConn will be used to send/receive +// packets between many remote destination addresses. +// +// inproxyUDPConn implements the net.PacketConn interface. +type inproxyUDPConn struct { + udpConn *net.UDPConn + + ipv6Synthesizer IPv6Synthesizer + + synthesizerMutex sync.Mutex + ipv4ToIPv6 map[netip.Addr]net.IP + ipv6ToIPv4 map[netip.Addr]net.IP +} + +func newInproxyUDPConn(ctx context.Context, config *Config) (net.PacketConn, error) { + + listen := &net.ListenConfig{ + Control: func(_, _ string, c syscall.RawConn) error { + var controlErr error + err := c.Control(func(fd uintptr) { + + socketFD := int(fd) + + setAdditionalSocketOptions(socketFD) + + // Use config.deviceBinder, with wired up logging, not + // config.DeviceBinder; other tunnel-core dials do this + // indirectly via psiphon.DialConfig. + + if config.deviceBinder != nil { + _, err := config.deviceBinder.BindToDevice(socketFD) + if err != nil { + controlErr = errors.Tracef("BindToDevice failed: %s", err) + return + } + } + }) + if controlErr != nil { + return errors.Trace(controlErr) + } + return errors.Trace(err) + }, + } + + // Create an "unconnected" UDP socket for use with WriteTo and listening + // on all interfaces. See the limitation comment in NewUDPConn regarding + // its equivilent mode. + + packetConn, err := listen.ListenPacket(ctx, "udp", "") + if err != nil { + return nil, errors.Trace(err) + } + + var ok bool + udpConn, ok := packetConn.(*net.UDPConn) + if !ok { + return nil, errors.Tracef("unexpected conn type: %T", packetConn) + } + + conn := &inproxyUDPConn{ + udpConn: udpConn, + ipv6Synthesizer: config.IPv6Synthesizer, + } + if conn.ipv6Synthesizer != nil { + conn.ipv4ToIPv6 = make(map[netip.Addr]net.IP) + conn.ipv6ToIPv4 = make(map[netip.Addr]net.IP) + } + + return conn, nil +} + +func inproxyUDPAddrFromAddrPort(addrPort netip.AddrPort) *net.UDPAddr { + return &net.UDPAddr{ + IP: addrPort.Addr().AsSlice(), + Port: int(addrPort.Port()), + } +} + +func (conn *inproxyUDPConn) ReadFrom(p []byte) (int, net.Addr, error) { + + // net.UDPConn.ReadFrom currently allocates a &UDPAddr{} per call, and so + // the &net.UDPAddr{} allocations done in the following synthesizer code + // path are no more than the standard code path. + // + // TODO: avoid all address allocations in both ReadFrom and WriteTo by: + // + // - changing ipvXToIPvY to map[netip.AddrPort]*net.UDPAddr + // - using a similar lookup for the non-synthesizer code path + // + // Such a scheme would work only if the caller is guaranteed to not mutate + // the returned net.Addr. + + if conn.ipv6Synthesizer == nil { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.ReadFrom(p) + } + + n, addrPort, err := conn.udpConn.ReadFromUDPAddrPort(p) + // Reverse any synthesized address before returning err. + + // Reverse the IPv6 synthesizer, returning the original IPv4 address + // as expected by the caller, including pion/webrtc. This logic + // assumes that no synthesized IPv6 address will conflict with any + // real IPv6 address. + + var IP net.IP + ipAddr := addrPort.Addr() + if ipAddr.Is6() { + conn.synthesizerMutex.Lock() + IP, _ = conn.ipv6ToIPv4[ipAddr] + conn.synthesizerMutex.Unlock() + } + if IP == nil { + IP = ipAddr.AsSlice() + } + + // Do not wrap any I/O err returned by UDPConn + return n, &net.UDPAddr{IP: IP, Port: int(addrPort.Port())}, err +} + +func (conn *inproxyUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { + + // See common.WriteTimeoutUDPConn. + err := conn.udpConn.SetWriteDeadline( + time.Now().Add(common.UDP_PACKET_WRITE_TIMEOUT)) + if err != nil { + return 0, errors.Trace(err) + } + + if conn.ipv6Synthesizer == nil { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.WriteTo(b, addr) + } + + // When configured, attempt to synthesize IPv6 addresses from an IPv4 + // addresses for compatibility on DNS64/NAT64 networks. + // + // Store any synthesized addresses in a lookup table and reuse for + // subsequent writes to the same destination as well as reversing the + // conversion on reads. + // + // If synthesize fails, fall back to trying the original address. + + // The netip.Addr type is used as the map key and the input address is + // assumed to be of the type *net.UDPAddr. This allows for more efficient + // lookup operations vs. a string key and parsing the input address via + // addr.String()/net.SplitHostPort(). + + udpAddr, ok := addr.(*net.UDPAddr) + if !ok { + return 0, errors.Tracef("unexpected addr type: %T", addr) + } + + // Stack allocate to avoid an extra heap allocation per write. + var synthesizedAddr net.UDPAddr + + if udpAddr.IP.To4() != nil { + + ip4Addr, ok := netip.AddrFromSlice(udpAddr.IP) + if !ok { + return 0, errors.Tracef("invalid addr") + } + conn.synthesizerMutex.Lock() + synthesizedIP, ok := conn.ipv4ToIPv6[ip4Addr] + conn.synthesizerMutex.Unlock() + if ok { + synthesizedAddr = net.UDPAddr{IP: synthesizedIP, Port: udpAddr.Port} + } else { + synthesized := conn.ipv6Synthesizer.IPv6Synthesize(udpAddr.IP.String()) + if synthesized != "" { + synthesizedIP := net.ParseIP(synthesized) + if synthesizedIP != nil { + conn.synthesizerMutex.Lock() + conn.ipv4ToIPv6[ip4Addr] = synthesizedIP + ipv6Addr, _ := netip.AddrFromSlice(synthesizedIP) + conn.ipv6ToIPv4[ipv6Addr] = udpAddr.IP + conn.synthesizerMutex.Unlock() + synthesizedAddr = net.UDPAddr{IP: synthesizedIP, Port: udpAddr.Port} + } + } + } + } + + if synthesizedAddr.IP == nil { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.WriteTo(b, addr) + } + + return conn.udpConn.WriteTo(b, &synthesizedAddr) +} + +func (conn *inproxyUDPConn) Close() error { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.Close() +} + +func (conn *inproxyUDPConn) LocalAddr() net.Addr { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.LocalAddr() +} + +func (conn *inproxyUDPConn) SetDeadline(t time.Time) error { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.SetDeadline(t) +} + +func (conn *inproxyUDPConn) SetReadDeadline(t time.Time) error { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.SetReadDeadline(t) +} + +func (conn *inproxyUDPConn) SetWriteDeadline(t time.Time) error { + // Do not wrap any I/O err returned by UDPConn + return conn.udpConn.SetWriteDeadline(t) +} + +// getInproxyNetworkType converts a legacy string network type to an inproxy +// package type. +func getInproxyNetworkType(networkType string) inproxy.NetworkType { + + // There is no VPN type conversion; clients and proxies will skip/fail + // in-proxy operations on non-Psiphon VPN networks. + + switch networkType { + case "WIFI": + return inproxy.NetworkTypeWiFi + case "MOBILE": + return inproxy.NetworkTypeMobile + } + + return inproxy.NetworkTypeUnknown +} diff --git a/psiphon/inproxy_test.go b/psiphon/inproxy_test.go new file mode 100644 index 000000000..e77500f3c --- /dev/null +++ b/psiphon/inproxy_test.go @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2023, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package psiphon + +import ( + "encoding/json" + "io/ioutil" + "os" + "regexp" + "testing" + + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/resolver" +) + +func TestInproxyComponents(t *testing.T) { + + // This is a unit test of the in-proxy components internals, such as + // replay; actual in-proxy broker round trips are exercised in the + // psiphon/server end-to-end tests. + + err := runInproxyBrokerDialParametersTest() + if err != nil { + t.Fatalf(errors.Trace(err).Error()) + } + + err = runInproxySTUNDialParametersTest() + if err != nil { + t.Fatalf(errors.Trace(err).Error()) + } + + err = runInproxyNATStateTest() + if err != nil { + t.Fatalf(errors.Trace(err).Error()) + } + + // TODO: test inproxyUDPConn multiplexed IPv6Synthesizer +} + +func runInproxyBrokerDialParametersTest() error { + + testDataDirName, err := ioutil.TempDir("", "psiphon-inproxy-broker-test") + if err != nil { + return errors.Trace(err) + } + defer os.RemoveAll(testDataDirName) + + isProxy := false + propagationChannelID := prng.HexString(8) + sponsorID := prng.HexString(8) + networkID := "NETWORK1" + addressRegex := `[a-z0-9]{5,10}\.example\.org` + commonCompartmentID, _ := inproxy.MakeID() + personalCompartmentID, _ := inproxy.MakeID() + commonCompartmentIDs := []string{commonCompartmentID.String()} + personalCompartmentIDs := []string{personalCompartmentID.String()} + privateKey, _ := inproxy.GenerateSessionPrivateKey() + publicKey, _ := privateKey.GetPublicKey() + obfuscationSecret, _ := inproxy.GenerateRootObfuscationSecret() + brokerSpecs := []*parameters.InproxyBrokerSpec{ + ¶meters.InproxyBrokerSpec{ + BrokerPublicKey: publicKey.String(), + BrokerRootObfuscationSecret: obfuscationSecret.String(), + BrokerFrontingSpecs: []*parameters.FrontingSpec{ + ¶meters.FrontingSpec{ + FrontingProviderID: prng.HexString(8), + Addresses: []string{addressRegex}, + VerifyServerName: "example.org", + Host: "example.org", + }, + }, + }, + } + retainFailed := float64(0.0) + + config := &Config{ + DataRootDirectory: testDataDirName, + PropagationChannelId: propagationChannelID, + SponsorId: sponsorID, + NetworkID: networkID, + } + err = config.Commit(false) + if err != nil { + return errors.Trace(err) + } + + err = OpenDataStore(config) + if err != nil { + return errors.Trace(err) + } + defer CloseDataStore() + + manager := NewInproxyBrokerClientManager(config, isProxy) + + // Test: no broker specs + + _, _, err = manager.GetBrokerClient(networkID) + if err == nil { + return errors.TraceNew("unexpected success") + } + + // Test: select broker and common compartment IDs + + config = &Config{ + DataRootDirectory: testDataDirName, + PropagationChannelId: propagationChannelID, + SponsorId: sponsorID, + NetworkID: networkID, + InproxyBrokerSpecs: brokerSpecs, + InproxyCommonCompartmentIDs: commonCompartmentIDs, + InproxyReplayBrokerRetainFailedProbability: &retainFailed, + } + err = config.Commit(false) + if err != nil { + return errors.Trace(err) + } + config.SetResolver(resolver.NewResolver(&resolver.NetworkConfig{}, networkID)) + + manager = NewInproxyBrokerClientManager(config, isProxy) + + brokerClient, brokerDialParams, err := manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if !regexp.MustCompile(addressRegex).Copy().Match( + []byte(brokerDialParams.FrontingDialAddress)) { + return errors.TraceNew("unexpected FrontingDialAddress") + } + + if len(brokerClient.GetBrokerDialCoordinator().CommonCompartmentIDs()) != 1 || + brokerClient.GetBrokerDialCoordinator().CommonCompartmentIDs()[0].String() != + commonCompartmentID.String() { + return errors.TraceNew("unexpected compartment IDs") + } + + _ = brokerDialParams.GetMetrics() + + // Test: replay on success + + previousFrontingDialAddress := brokerDialParams.FrontingDialAddress + previousTLSProfile := brokerDialParams.TLSProfile + + roundTripper, err := brokerClient.GetBrokerDialCoordinator().BrokerClientRoundTripper() + if err != nil { + return errors.Trace(err) + } + + brokerClient.GetBrokerDialCoordinator().BrokerClientRoundTripperSucceeded(roundTripper) + + manager = NewInproxyBrokerClientManager(config, isProxy) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if !brokerDialParams.isReplay { + return errors.TraceNew("unexpected non-replay") + } + + if brokerDialParams.FrontingDialAddress != previousFrontingDialAddress { + return errors.TraceNew("unexpected replayed FrontingDialAddress") + } + + if brokerDialParams.TLSProfile != previousTLSProfile { + return errors.TraceNew("unexpected replayed TLSProfile") + } + + _ = brokerDialParams.GetMetrics() + + // Test: manager's broker client and dial parameters reinitialized after + // network ID change + + previousBrokerClient := brokerClient + previousNetworkID := networkID + networkID = "NETWORK2" + config.networkIDGetter = newStaticNetworkGetter(networkID) + config.SetResolver(resolver.NewResolver(&resolver.NetworkConfig{}, networkID)) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if brokerClient == previousBrokerClient { + return errors.TraceNew("unexpected brokerClient") + } + + if brokerDialParams.isReplay { + return errors.TraceNew("unexpected replay") + } + + if brokerDialParams.FrontingDialAddress == previousFrontingDialAddress { + return errors.TraceNew("unexpected non-replayed FrontingDialAddress") + } + + _ = brokerDialParams.GetMetrics() + + // Test: another replay after switch back to previous network ID + + networkID = previousNetworkID + config.networkIDGetter = newStaticNetworkGetter(networkID) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if !brokerDialParams.isReplay { + return errors.TraceNew("unexpected non-replay") + } + + if brokerDialParams.FrontingDialAddress != previousFrontingDialAddress { + return errors.TraceNew("unexpected replayed FrontingDialAddress") + } + + if brokerDialParams.TLSProfile != previousTLSProfile { + return errors.TraceNew("unexpected replayed TLSProfile") + } + + _ = brokerDialParams.GetMetrics() + + // Test: clear replay + + roundTripper, err = brokerClient.GetBrokerDialCoordinator().BrokerClientRoundTripper() + if err != nil { + return errors.Trace(err) + } + + brokerClient.GetBrokerDialCoordinator().BrokerClientRoundTripperFailed(roundTripper) + + manager = NewInproxyBrokerClientManager(config, isProxy) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if brokerDialParams.isReplay { + return errors.TraceNew("unexpected replay") + } + + if brokerDialParams.FrontingDialAddress == previousFrontingDialAddress { + return errors.TraceNew("unexpected non-replayed FrontingDialAddress") + } + + _ = brokerDialParams.GetMetrics() + + // Test: no common compartment IDs sent when personal ID is set + + config.InproxyClientPersonalCompartmentIDs = personalCompartmentIDs + config.InproxyProxyPersonalCompartmentIDs = personalCompartmentIDs + + manager = NewInproxyBrokerClientManager(config, isProxy) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if len(brokerClient.GetBrokerDialCoordinator().CommonCompartmentIDs()) != 0 || + len(brokerClient.GetBrokerDialCoordinator().PersonalCompartmentIDs()) != 1 || + brokerClient.GetBrokerDialCoordinator().PersonalCompartmentIDs()[0].String() != + personalCompartmentID.String() { + return errors.TraceNew("unexpected compartment IDs") + } + + // Test: use persisted common compartment IDs + + config = &Config{ + PropagationChannelId: propagationChannelID, + SponsorId: sponsorID, + NetworkID: networkID, + } + config.InproxyBrokerSpecs = brokerSpecs + config.InproxyCommonCompartmentIDs = nil + err = config.Commit(false) + if err != nil { + return errors.Trace(err) + } + config.SetResolver(resolver.NewResolver(&resolver.NetworkConfig{}, networkID)) + + manager = NewInproxyBrokerClientManager(config, isProxy) + + brokerClient, brokerDialParams, err = manager.GetBrokerClient(networkID) + if err != nil { + return errors.Trace(err) + } + + if len(brokerClient.GetBrokerDialCoordinator().CommonCompartmentIDs()) != 1 || + brokerClient.GetBrokerDialCoordinator().CommonCompartmentIDs()[0].String() != + commonCompartmentID.String() { + return errors.TraceNew("unexpected compartment IDs") + } + + _ = brokerDialParams.GetMetrics() + + return nil +} + +func runInproxySTUNDialParametersTest() error { + + testDataDirName, err := ioutil.TempDir("", "psiphon-inproxy-stun-test") + if err != nil { + return errors.Trace(err) + } + defer os.RemoveAll(testDataDirName) + + propagationChannelID := prng.HexString(8) + sponsorID := prng.HexString(8) + networkID := "NETWORK1" + stunServerAddresses := []string{"example.org"} + + config := &Config{ + DataRootDirectory: testDataDirName, + PropagationChannelId: propagationChannelID, + SponsorId: sponsorID, + NetworkID: networkID, + InproxySTUNServerAddresses: stunServerAddresses, + InproxySTUNServerAddressesRFC5780: stunServerAddresses, + } + err = config.Commit(false) + if err != nil { + return errors.Trace(err) + } + config.SetResolver(resolver.NewResolver(&resolver.NetworkConfig{}, networkID)) + + p := config.GetParameters().Get() + defer p.Close() + + dialParams, err := MakeInproxySTUNDialParameters(config, p, false) + if err != nil { + return errors.Trace(err) + } + + _ = dialParams.GetMetrics() + + dialParamsJSON, err := json.Marshal(dialParams) + if err != nil { + return errors.Trace(err) + } + + var replayDialParams *InproxySTUNDialParameters + err = json.Unmarshal(dialParamsJSON, &replayDialParams) + if err != nil { + return errors.Trace(err) + } + + replayDialParams.Prepare() + + _ = replayDialParams.GetMetrics() + + return nil +} + +func runInproxyNATStateTest() error { + + propagationChannelID := prng.HexString(8) + sponsorID := prng.HexString(8) + networkID := "NETWORK1" + + config := &Config{ + PropagationChannelId: propagationChannelID, + SponsorId: sponsorID, + NetworkID: networkID, + } + err := config.Commit(false) + if err != nil { + return errors.Trace(err) + } + + manager := NewInproxyNATStateManager(config) + + // Test: set values stored and cached + + manager.setNATType(networkID, inproxy.NATTypeSymmetric) + manager.setPortMappingTypes(networkID, inproxy.PortMappingTypes{inproxy.PortMappingTypeUPnP}) + + if manager.getNATType(networkID) != inproxy.NATTypeSymmetric { + return errors.TraceNew("unexpected NAT type") + } + + portMappingTypes := manager.getPortMappingTypes(networkID) + if len(portMappingTypes) != 1 || portMappingTypes[0] != inproxy.PortMappingTypeUPnP { + return errors.TraceNew("unexpected port mapping types") + } + + // Test: set values ignored when network ID is changing + + otherNetworkID := "NETWORK2" + + manager.setNATType(otherNetworkID, inproxy.NATTypePortRestrictedCone) + manager.setPortMappingTypes(otherNetworkID, inproxy.PortMappingTypes{inproxy.PortMappingTypePMP}) + + if manager.getNATType(networkID) != inproxy.NATTypeSymmetric { + return errors.TraceNew("unexpected NAT type") + } + + portMappingTypes = manager.getPortMappingTypes(networkID) + if len(portMappingTypes) != 1 || portMappingTypes[0] != inproxy.PortMappingTypeUPnP { + return errors.TraceNew("unexpected port mapping types") + } + + // Test: reset + + networkID = "NETWORK2" + config.networkIDGetter = newStaticNetworkGetter(networkID) + + manager.reset() + + if manager.networkID != networkID { + return errors.TraceNew("unexpected network ID") + } + + if manager.getNATType(networkID) != inproxy.NATTypeUnknown { + return errors.TraceNew("unexpected NAT type") + } + + if len(manager.getPortMappingTypes(networkID)) != 0 { + return errors.TraceNew("unexpected port mapping types") + } + + return nil +} diff --git a/psiphon/limitProtocols_test.go b/psiphon/limitProtocols_test.go index 609304ab3..3b5ebcafd 100644 --- a/psiphon/limitProtocols_test.go +++ b/psiphon/limitProtocols_test.go @@ -98,8 +98,8 @@ func TestLimitTunnelProtocols(t *testing.T) { { "ClientPlatform" : "Windows", "ClientVersion" : "0", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "DisableRemoteServerListFetcher" : true }` clientConfig, err := LoadConfig([]byte(clientConfigJSON)) @@ -145,10 +145,8 @@ func TestLimitTunnelProtocols(t *testing.T) { _, _, _, _, encodedServerEntry, err := server.GenerateConfig( &server.GenerateConfigParams{ - ServerIPAddress: fmt.Sprintf("0.1.0.0"), - EnableSSHAPIRequests: true, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, + ServerIPAddress: fmt.Sprintf("0.1.0.0"), + TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, }) if err != nil { t.Fatalf("error generating server config: %s", err) diff --git a/psiphon/meekConn.go b/psiphon/meekConn.go index f6249d32d..d9572e4c9 100644 --- a/psiphon/meekConn.go +++ b/psiphon/meekConn.go @@ -34,7 +34,6 @@ import ( "net/url" "strings" "sync" - "sync/atomic" "time" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" @@ -67,6 +66,7 @@ const ( MeekModeRelay = iota MeekModeObfuscatedRoundTrip MeekModePlaintextRoundTrip + MeekModeWrappedPlaintextRoundTrip ) // MeekConfig specifies the behavior of a MeekConn. @@ -100,9 +100,16 @@ type MeekConfig struct { // HTTP level; TLS and server certificate verification is required; the // origin server may be any HTTP(S) server. // - // As with the other modes, MeekModePlaintextRoundTrip supports HTTP/2 with - // utls, and integration with DialParameters for replay -- which are not - // otherwise implemented if using just CustomTLSDialer and net.http. + // MeekModeWrappedPlaintextRoundTrip: is equivalent to + // MeekModePlaintextRoundTrip, except skipping of server certificate + // verification is permitted. In this mode, the caller is asserting that + // the HTTP plaintext payload is wrapped in its own transport security + // layer. + // + // As with the other modes, MeekMode[Wrapped]PlaintextRoundTrip supports + // HTTP/2 with utls, and integration with DialParameters for replay -- + // which are not otherwise implemented if using just CustomTLSDialer and + // net.http. Mode MeekMode // DialAddress is the actual network address to dial to establish a @@ -149,7 +156,8 @@ type MeekConfig struct { RandomizedTLSProfileSeed *prng.Seed // UseObfuscatedSessionTickets indicates whether to use obfuscated session - // tickets. Assumes UseHTTPS is true. Ignored for MeekModePlaintextRoundTrip. + // tickets. Assumes UseHTTPS is true. + // Ignored for MeekMode[Wrapped]PlaintextRoundTrip. UseObfuscatedSessionTickets bool // SNIServerName is the value to place in the TLS/QUIC SNI server_name field @@ -192,8 +200,8 @@ type MeekConfig struct { // ClientTunnelProtocol is the protocol the client is using. It's included in // the meek cookie for optional use by the server, in cases where the server // cannot unambiguously determine the tunnel protocol. ClientTunnelProtocol - // is used when selecting tactics targeted at specific protocols. Ignored for - // MeekModePlaintextRoundTrip. + // is used when selecting tactics targeted at specific protocols. + // Ignored for MeekMode[Wrapped]PlaintextRoundTrip. ClientTunnelProtocol string // NetworkLatencyMultiplier specifies a custom network latency multiplier to @@ -201,7 +209,7 @@ type MeekConfig struct { NetworkLatencyMultiplier float64 // The following values are used to create the obfuscated meek cookie. - // Ignored for MeekModePlaintextRoundTrip. + // Ignored for MeekMode[Wrapped]PlaintextRoundTrip. MeekCookieEncryptionPublicKey string MeekObfuscatedKey string @@ -244,15 +252,14 @@ type MeekConn struct { tlsPadding int limitRequestPayloadLength int redialTLSProbability float64 - underlyingDialer common.Dialer - cachedTLSDialer *cachedTLSDialer transport transporter - mutex sync.Mutex - isClosed bool - runCtx context.Context - stopRunning context.CancelFunc - relayWaitGroup *sync.WaitGroup - firstUnderlyingConn net.Conn + connManager *meekUnderlyingConnManager + + mutex sync.Mutex + isClosed bool + runCtx context.Context + stopRunning context.CancelFunc + relayWaitGroup *sync.WaitGroup // For MeekModeObfuscatedRoundTrip meekCookieEncryptionPublicKey string @@ -324,20 +331,6 @@ func DialMeek( runCtx, stopRunning := context.WithCancel(context.Background()) - cleanupStopRunning := true - cleanupCachedTLSDialer := true - var cachedTLSDialer *cachedTLSDialer - - // Cleanup in error cases - defer func() { - if cleanupStopRunning { - stopRunning() - } - if cleanupCachedTLSDialer && cachedTLSDialer != nil { - cachedTLSDialer.close() - } - }() - meek := &MeekConn{ params: meekConfig.Parameters, mode: meekConfig.Mode, @@ -348,6 +341,19 @@ func DialMeek( relayWaitGroup: new(sync.WaitGroup), } + cleanupStopRunning := true + cleanupConns := true + + // Cleanup in error cases + defer func() { + if cleanupStopRunning { + meek.stopRunning() + } + if cleanupConns && meek.connManager != nil { + meek.connManager.closeAll() + } + }() + if meek.mode == MeekModeRelay { var err error meek.cookie, @@ -396,13 +402,15 @@ func DialMeek( return packetConn, remoteAddr, nil } + meek.connManager = newMeekUnderlyingConnManager(nil, nil, udpDialer) + var err error transport, err = quic.NewQUICTransporter( ctx, func(message string) { NoticeInfo(message) }, - udpDialer, + meek.connManager.dialPacketConn, meekConfig.SNIServerName, meekConfig.QUICVersion, meekConfig.QUICClientHelloSeed, @@ -448,12 +456,10 @@ func DialMeek( scheme = "https" - meek.initUnderlyingDialer(dialConfig) - tlsConfig := &CustomTLSConfig{ Parameters: meekConfig.Parameters, DialAddr: meekConfig.DialAddress, - Dial: meek.underlyingDial, + Dial: NewTCPDialer(dialConfig), SNIServerName: meekConfig.SNIServerName, SkipVerify: skipVerify, VerifyServerName: meekConfig.VerifyServerName, @@ -473,6 +479,7 @@ func DialMeek( } if meekConfig.Mode != MeekModePlaintextRoundTrip && + meekConfig.Mode != MeekModeWrappedPlaintextRoundTrip && meekConfig.MeekObfuscatedKey != "" { // As the passthrough message is unique and indistinguishable from a normal @@ -520,14 +527,10 @@ func DialMeek( // session to preserve, and establishment will simply try another server. // Note that the underlying TCPDial may still try multiple IP addreses when // the destination is a domain and it resolves to multiple IP adresses. - + // // The pre-dial is made within the parent dial context, so that DialMeek // may be interrupted. Subsequent dials are made within the meek round trip - // request context. Since http.DialTLS doesn't take a context argument - // (yet; as of Go 1.9 this issue is still open: https://github.com/golang/go/issues/21526), - // cachedTLSDialer is used as a conduit to send the request context. - // meekConn.relayRoundTrip sets its request context into cachedTLSDialer, - // and cachedTLSDialer.dial uses that context. + // request context. // As DialAddr is set in the CustomTLSConfig, no address is required here. preConn, err := tlsDialer(ctx, "tcp", "") @@ -535,20 +538,19 @@ func DialMeek( return nil, errors.Trace(err) } - cachedTLSDialer = newCachedTLSDialer(preConn, tlsDialer) + meek.connManager = newMeekUnderlyingConnManager(preConn, tlsDialer, nil) if IsTLSConnUsingHTTP2(preConn) { NoticeInfo("negotiated HTTP/2 for %s", meekConfig.DiagnosticID) transport = &http2.Transport{ - DialTLS: func(network, addr string, _ *tls.Config) (net.Conn, error) { - return cachedTLSDialer.dial(network, addr) + DialTLSContext: func( + ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + return meek.connManager.dial(ctx, network, addr) }, } } else { transport = &http.Transport{ - DialTLS: func(network, addr string) (net.Conn, error) { - return cachedTLSDialer.dial(network, addr) - }, + DialTLSContext: meek.connManager.dial, } } @@ -579,8 +581,7 @@ func DialMeek( *copyDialConfig = *dialConfig copyDialConfig.UpstreamProxyURL = "" - meek.initUnderlyingDialer(copyDialConfig) - dialer = meek.underlyingDial + dialer = NewTCPDialer(copyDialConfig) // In this proxy case, the destination server address is in the // request line URL. net/http will render the request line using @@ -604,8 +605,7 @@ func DialMeek( // If dialConfig.UpstreamProxyURL is set, HTTP proxying via // CONNECT will be used by the dialer. - meek.initUnderlyingDialer(dialConfig) - baseDialer := meek.underlyingDial + baseDialer := NewTCPDialer(dialConfig) // The dialer ignores any address that http.Transport will pass in // (derived from the HTTP request URL) and always dials @@ -619,14 +619,19 @@ func DialMeek( // Only apply transformer if it will perform a transform; otherwise // applying a no-op transform will incur an unnecessary performance // cost. - if meekConfig.HTTPTransformerParameters != nil && meekConfig.HTTPTransformerParameters.ProtocolTransformSpec != nil { - dialer = transforms.WrapDialerWithHTTPTransformer(dialer, meekConfig.HTTPTransformerParameters) + if meekConfig.HTTPTransformerParameters != nil && + meekConfig.HTTPTransformerParameters.ProtocolTransformSpec != nil { + + dialer = transforms.WrapDialerWithHTTPTransformer( + dialer, meekConfig.HTTPTransformerParameters) } } + meek.connManager = newMeekUnderlyingConnManager(nil, dialer, nil) + httpTransport := &http.Transport{ Proxy: proxyUrl, - DialContext: dialer, + DialContext: meek.connManager.dial, } if proxyUrl != nil { @@ -696,12 +701,11 @@ func DialMeek( meek.url = url meek.additionalHeaders = additionalHeaders - meek.cachedTLSDialer = cachedTLSDialer meek.transport = transport // stopRunning and cachedTLSDialer will now be closed in meek.Close() cleanupStopRunning = false - cleanupCachedTLSDialer = false + cleanupConns = false // Allocate relay resources, including buffers and running the relay // go routine, only when running in relay mode. @@ -753,85 +757,188 @@ func DialMeek( meek.meekObfuscatorPaddingSeed = meekConfig.MeekObfuscatorPaddingSeed meek.clientTunnelProtocol = meekConfig.ClientTunnelProtocol - } else if meek.mode == MeekModePlaintextRoundTrip { + } else if meek.mode == MeekModePlaintextRoundTrip || + meek.mode == MeekModeWrappedPlaintextRoundTrip { // MeekModeRelay and MeekModeObfuscatedRoundTrip set the Host header - // implicitly via meek.url; MeekModePlaintextRoundTrip does not use - // meek.url; it uses the RoundTrip input request.URL instead. So the - // Host header is set to meekConfig.HostHeader explicitly here. + // implicitly via meek.url; MeekMode[Wrapped]PlaintextRoundTrip does + // not use meek.url; it uses the RoundTrip input request.URL instead. + // So the Host header is set to meekConfig.HostHeader explicitly here. meek.additionalHeaders.Add("Host", meekConfig.HostHeader) } return meek, nil } -func (meek *MeekConn) initUnderlyingDialer(dialConfig *DialConfig) { +type meekPacketConnDialer func(ctx context.Context) (net.PacketConn, *net.UDPAddr, error) - // Not safe for concurrent calls; should be called only from DialMeek. - meek.underlyingDialer = NewTCPDialer(dialConfig) +// meekUnderlyingConnManager tracks the TCP/TLS and UDP connections underlying +// the meek HTTP/HTTPS/QUIC transports. This tracking is used to: +// +// - Use the cached predial TLS conn created in DialMeek. +// - Gather metrics from mechanisms enabled in the underlying conns, such as +// the fragmentor, or inproxy. +// - Fully close all underlying connections with the MeekConn is closed. +type meekUnderlyingConnManager struct { + mutex sync.Mutex + cachedConn net.Conn + firstConn net.Conn + firstPacketConn net.PacketConn + + dialer common.Dialer + managedConns *common.Conns[net.Conn] + + packetConnDialer meekPacketConnDialer + managedPacketConns *common.Conns[net.PacketConn] } -func (meek *MeekConn) underlyingDial(ctx context.Context, network, addr string) (net.Conn, error) { - conn, err := meek.underlyingDialer(ctx, network, addr) - if err == nil { - meek.mutex.Lock() - if meek.firstUnderlyingConn == nil { - // Keep a reference to the first underlying conn to be used as a - // common.MetricsSource in GetMetrics. This enables capturing - // metrics such as fragmentor configuration. - meek.firstUnderlyingConn = conn - } - meek.mutex.Unlock() - } +type meekUnderlyingConn struct { + net.Conn + connManager *meekUnderlyingConnManager +} + +func (conn *meekUnderlyingConn) Close() error { + conn.connManager.managedConns.Remove(conn) + // Note: no trace error to preserve error type - return conn, err + return conn.Conn.Close() } -type cachedTLSDialer struct { - usedCachedConn int32 - cachedConn net.Conn - dialer common.Dialer +type meekUnderlyingPacketConn struct { + net.PacketConn + connManager *meekUnderlyingConnManager +} - mutex sync.Mutex - requestCtx context.Context +func (packetConn *meekUnderlyingPacketConn) Close() error { + packetConn.connManager.managedPacketConns.Remove(packetConn) + return packetConn.PacketConn.Close() } -func newCachedTLSDialer(cachedConn net.Conn, dialer common.Dialer) *cachedTLSDialer { - return &cachedTLSDialer{ - cachedConn: cachedConn, - dialer: dialer, +func newMeekUnderlyingConnManager( + cachedConn net.Conn, + dialer common.Dialer, + packetConnDialer meekPacketConnDialer) *meekUnderlyingConnManager { + + m := &meekUnderlyingConnManager{ + dialer: dialer, + managedConns: common.NewConns[net.Conn](), + + packetConnDialer: packetConnDialer, + managedPacketConns: common.NewConns[net.PacketConn](), + } + + if cachedConn != nil { + m.cachedConn = &meekUnderlyingConn{Conn: cachedConn, connManager: m} + m.firstConn = cachedConn } + + return m } -func (c *cachedTLSDialer) setRequestContext(requestCtx context.Context) { - // Note: not using sync.Value since underlying type of requestCtx may change. - c.mutex.Lock() - defer c.mutex.Unlock() - c.requestCtx = requestCtx +func (m *meekUnderlyingConnManager) GetMetrics() common.LogFields { + + logFields := common.LogFields{} + + m.mutex.Lock() + underlyingMetrics, ok := m.firstConn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + + underlyingMetrics, ok = m.firstPacketConn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + m.mutex.Unlock() + + return logFields } -func (c *cachedTLSDialer) dial(network, addr string) (net.Conn, error) { - if atomic.CompareAndSwapInt32(&c.usedCachedConn, 0, 1) { - conn := c.cachedConn - c.cachedConn = nil +func (m *meekUnderlyingConnManager) dial( + ctx context.Context, network, addr string) (net.Conn, error) { + + if m.managedConns.IsClosed() { + return nil, errors.TraceNew("closed") + } + + // Consume the cached conn when present. + + m.mutex.Lock() + var conn net.Conn + if m.cachedConn != nil { + conn = m.cachedConn + m.cachedConn = nil + } + m.mutex.Unlock() + + if conn != nil { return conn, nil } - c.mutex.Lock() - ctx := c.requestCtx - c.mutex.Unlock() - if ctx == nil { - ctx = context.Background() + // The mutex lock is not held for the duration of dial, allowing for + // concurrent dials. + + conn, err := m.dialer(ctx, network, addr) + if err != nil { + // Note: no trace error to preserve error type + return nil, err + } + + // Keep a reference to the first underlying conn to be used as a + // common.MetricsSource in GetMetrics. This enables capturing metrics + // such as fragmentor configuration. + + m.mutex.Lock() + if m.firstConn == nil { + m.firstConn = conn } + m.mutex.Unlock() + + // Wrap the dialed conn with meekUnderlyingConn, which will remove the + // conn from the set of tracked conns when the conn is closed. + + conn = &meekUnderlyingConn{Conn: conn, connManager: m} - return c.dialer(ctx, network, addr) + if !m.managedConns.Add(conn) { + _ = conn.Close() + return nil, errors.TraceNew("closed") + } + + return conn, nil } -func (c *cachedTLSDialer) close() { - if atomic.CompareAndSwapInt32(&c.usedCachedConn, 0, 1) { - c.cachedConn.Close() - c.cachedConn = nil +func (m *meekUnderlyingConnManager) dialPacketConn( + ctx context.Context) (net.PacketConn, *net.UDPAddr, error) { + + if m.managedPacketConns.IsClosed() { + return nil, nil, errors.TraceNew("closed") + } + + packetConn, addr, err := m.packetConnDialer(ctx) + if err != nil { + // Note: no trace error to preserve error type + return nil, nil, err + } + + m.mutex.Lock() + if m.firstPacketConn != nil { + m.firstPacketConn = packetConn + } + m.mutex.Unlock() + + packetConn = &meekUnderlyingPacketConn{PacketConn: packetConn, connManager: m} + + if !m.managedPacketConns.Add(packetConn) { + _ = packetConn.Close() + return nil, nil, errors.TraceNew("closed") } + + return packetConn, addr, nil +} + +func (m *meekUnderlyingConnManager) closeAll() { + m.managedConns.CloseAll() + m.managedPacketConns.CloseAll() } // Close terminates the meek connection and releases its resources. In in @@ -847,30 +954,12 @@ func (meek *MeekConn) Close() (err error) { if !isClosed { meek.stopRunning() - if meek.cachedTLSDialer != nil { - meek.cachedTLSDialer.close() - } - - // stopRunning interrupts HTTP requests in progress by closing the context - // associated with the request. In the case of h2quic.RoundTripper, testing - // indicates that quic-go.receiveStream.readImpl in _not_ interrupted in - // this case, and so an in-flight FRONTED-MEEK-QUIC round trip may hang shutdown - // in relayRoundTrip->readPayload->...->quic-go.receiveStream.readImpl. - // - // To workaround this, we call CloseIdleConnections _before_ Wait, as, in - // the case of QUICTransporter, this closes the underlying UDP sockets which - // interrupts any blocking I/O calls. - // - // The standard CloseIdleConnections call _after_ wait is for the net/http - // case: it only closes idle connections, so the call should be after wait. - // This call is intended to clean up all network resources deterministically - // before Close returns. - if meek.isQUIC { - meek.transport.CloseIdleConnections() - } - + meek.connManager.closeAll() meek.relayWaitGroup.Wait() - meek.transport.CloseIdleConnections() + + // meek.transport.CloseIdleConnections is no longed called here since + // meekUnderlyingConnManager.closeAll will terminate all underlying + // connections and prevent opening any new connections. } return nil } @@ -895,16 +984,12 @@ func (meek *MeekConn) GetMetrics() common.LogFields { logFields["meek_limit_request"] = meek.limitRequestPayloadLength logFields["meek_redial_probability"] = meek.redialTLSProbability } + // Include metrics, such as fragmentor metrics, from the _first_ underlying // dial conn. Properties of subsequent underlying dial conns are not reflected // in these metrics; we assume that the first dial conn, which most likely // transits the various protocol handshakes, is most significant. - meek.mutex.Lock() - underlyingMetrics, ok := meek.firstUnderlyingConn.(common.MetricsSource) - if ok { - logFields.Add(underlyingMetrics.GetMetrics()) - } - meek.mutex.Unlock() + logFields.Add(meek.connManager.GetMetrics()) return logFields } @@ -927,10 +1012,8 @@ func (meek *MeekConn) GetNoticeMetrics() common.LogFields { // plaintext in the meek traffic. The caller is responsible for securing and // obfuscating the request body. // -// ObfuscatedRoundTrip is not safe for concurrent use. The caller must ensure -// only one ObfuscatedRoundTrip call is active at once. If Close is called -// before or concurrent with ObfuscatedRoundTrip, or before the response body -// is read, idle connections may be left open. +// If Close is called before or concurrent with ObfuscatedRoundTrip, or before +// the response body is read, idle connections may be left open. func (meek *MeekConn) ObfuscatedRoundTrip( requestCtx context.Context, endPoint string, requestBody []byte) ([]byte, error) { @@ -949,14 +1032,6 @@ func (meek *MeekConn) ObfuscatedRoundTrip( return nil, errors.Trace(err) } - // Note: - // - // - multiple, concurrent ObfuscatedRoundTrip calls are unsafe due to the - // setDialerRequestContext calls in newRequest. - // - // At this time, ObfuscatedRoundTrip is used for tactics in Controller and - // the concurrency constraints are satisfied. - request, err := meek.newRequest( requestCtx, cookie, contentType, bytes.NewReader(requestBody), 0) if err != nil { @@ -988,13 +1063,12 @@ func (meek *MeekConn) ObfuscatedRoundTrip( // used when TLS and server certificate verification are configured. RoundTrip // does not implement any security or obfuscation at the HTTP layer. // -// RoundTrip is not safe for concurrent use. The caller must ensure only one -// RoundTrip call is active at once. If Close is called before or concurrent -// with RoundTrip, or before the response body is read, idle connections may -// be left open. +// If Close is called before or concurrent with RoundTrip, or before the +// response body is read, idle connections may be left open. func (meek *MeekConn) RoundTrip(request *http.Request) (*http.Response, error) { - if meek.mode != MeekModePlaintextRoundTrip { + if meek.mode != MeekModePlaintextRoundTrip && + meek.mode != MeekModeWrappedPlaintextRoundTrip { return nil, errors.TraceNew("operation unsupported") } @@ -1004,12 +1078,6 @@ func (meek *MeekConn) RoundTrip(request *http.Request) (*http.Response, error) { request = request.Clone(requestCtx) meek.addAdditionalHeaders(request) - // The setDialerRequestContext/CloseIdleConnections concurrency note in - // ObfuscatedRoundTrip applies to RoundTrip as well. - - // Ensure dials are made within the request context. - meek.setDialerRequestContext(requestCtx) - meek.scheduleQUICCloseIdle(request) response, err := meek.transport.RoundTrip(request) @@ -1298,9 +1366,6 @@ func (meek *MeekConn) newRequest( body io.Reader, contentLength int) (*http.Request, error) { - // Ensure dials are made within the request context. - meek.setDialerRequestContext(requestCtx) - request, err := http.NewRequest("POST", meek.url.String(), body) if err != nil { return nil, errors.Trace(err) @@ -1326,17 +1391,6 @@ func (meek *MeekConn) newRequest( return request, nil } -// setDialerRequestContext ensures that underlying TLS/QUIC dials operate -// within the context of the request context. setDialerRequestContext must not -// be called while another request is already in flight. -func (meek *MeekConn) setDialerRequestContext(requestCtx context.Context) { - if meek.isQUIC { - meek.transport.(*quic.QUICTransporter).SetRequestContext(requestCtx) - } else if meek.cachedTLSDialer != nil { - meek.cachedTLSDialer.setRequestContext(requestCtx) - } -} - // Workaround for h2quic.RoundTripper context issue. See comment in // MeekConn.Close. func (meek *MeekConn) scheduleQUICCloseIdle(request *http.Request) { diff --git a/psiphon/meekConn_test.go b/psiphon/meekConn_test.go index cebbc86cd..12a954d57 100644 --- a/psiphon/meekConn_test.go +++ b/psiphon/meekConn_test.go @@ -57,50 +57,72 @@ func TestMeekModePlaintextRoundTrip(t *testing.T) { t.Fatalf("parameters.NewParameters failed: %v", err) } - meekConfig := &MeekConfig{ - Parameters: params, - Mode: MeekModePlaintextRoundTrip, - DialAddress: serverAddr, - UseHTTPS: true, - SNIServerName: "not-" + serverName, - VerifyServerName: serverName, - VerifyPins: []string{rootCACertificatePin, serverCertificatePin}, + testCases := []struct { + description string + meekMode MeekMode + verifyServerName string + verifyPins []string + }{ + { + meekMode: MeekModePlaintextRoundTrip, + verifyServerName: serverName, + verifyPins: []string{rootCACertificatePin, serverCertificatePin}, + }, + { + meekMode: MeekModeWrappedPlaintextRoundTrip, + verifyServerName: "", + verifyPins: nil, + }, } - dialConfig := &DialConfig{ - TrustedCACertificatesFilename: rootCAsFileName, - CustomDialer: dialer, - } - - for _, tlsFragmentClientHello := range []bool{false, true} { - - ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second) - defer cancelFunc() - - meekConfig.TLSFragmentClientHello = tlsFragmentClientHello - - meekConn, err := DialMeek(ctx, meekConfig, dialConfig) - if err != nil { - t.Fatalf("DialMeek failed: %v", err) - } - - client := &http.Client{ - Transport: meekConn, - } - - response, err := client.Get("https://" + serverAddr + "/") - if err != nil { - t.Fatalf("http.Client.Get failed: %v", err) - } - response.Body.Close() - - if response.StatusCode != http.StatusOK { - t.Fatalf("unexpected response code: %v", response.StatusCode) - } - - err = meekConn.Close() - if err != nil { - t.Fatalf("MeekConn.Close failed: %v", err) - } + for _, testCase := range testCases { + t.Run(testCase.description, func(t *testing.T) { + meekConfig := &MeekConfig{ + Parameters: params, + Mode: testCase.meekMode, + DialAddress: serverAddr, + UseHTTPS: true, + SNIServerName: "not-" + serverName, + VerifyServerName: testCase.verifyServerName, + VerifyPins: testCase.verifyPins, + } + + dialConfig := &DialConfig{ + TrustedCACertificatesFilename: rootCAsFileName, + CustomDialer: dialer, + } + + for _, tlsFragmentClientHello := range []bool{false, true} { + + ctx, cancelFunc := context.WithTimeout(context.Background(), 1*time.Second) + defer cancelFunc() + + meekConfig.TLSFragmentClientHello = tlsFragmentClientHello + + meekConn, err := DialMeek(ctx, meekConfig, dialConfig) + if err != nil { + t.Fatalf("DialMeek failed: %v", err) + } + + client := &http.Client{ + Transport: meekConn, + } + + response, err := client.Get("https://" + serverAddr + "/") + if err != nil { + t.Fatalf("http.Client.Get failed: %v", err) + } + response.Body.Close() + + if response.StatusCode != http.StatusOK { + t.Fatalf("unexpected response code: %v", response.StatusCode) + } + + err = meekConn.Close() + if err != nil { + t.Fatalf("MeekConn.Close failed: %v", err) + } + } + }) } } diff --git a/psiphon/memory_test/memory_test.go b/psiphon/memory_test/memory_test.go index ab97fbf91..b4cfade2c 100644 --- a/psiphon/memory_test/memory_test.go +++ b/psiphon/memory_test/memory_test.go @@ -95,7 +95,11 @@ func runMemoryTest(t *testing.T, testMode int) { // Most of these fields _must_ be filled in before calling LoadConfig, // so that they are correctly set into client parameters. var modifyConfig map[string]interface{} - json.Unmarshal(configJSON, &modifyConfig) + err = json.Unmarshal(configJSON, &modifyConfig) + if err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + modifyConfig["ClientVersion"] = "999999999" modifyConfig["TunnelPoolSize"] = 1 modifyConfig["DataRootDirectory"] = testDataDirName diff --git a/psiphon/net.go b/psiphon/net.go index c65f01166..dd8872932 100644 --- a/psiphon/net.go +++ b/psiphon/net.go @@ -114,19 +114,6 @@ type DialConfig struct { CustomDialer common.Dialer } -// WithoutFragmentor returns a copy of the DialConfig with any fragmentor -// configuration disabled. The return value is not a deep copy and may be the -// input DialConfig; it should not be modified. -func (config *DialConfig) WithoutFragmentor() *DialConfig { - if config.FragmentorConfig == nil { - return config - } - newConfig := new(DialConfig) - *newConfig = *config - newConfig.FragmentorConfig = nil - return newConfig -} - // NetworkConnectivityChecker defines the interface to the external // HasNetworkConnectivity provider, which call into the host application to // check for network connectivity. @@ -189,7 +176,6 @@ type NetworkIDGetter interface { } // RefractionNetworkingDialer implements psiphon/common/refraction.Dialer. - type RefractionNetworkingDialer struct { config *DialConfig } @@ -385,7 +371,7 @@ func UntunneledResolveIP( frontingProviderID string) ([]net.IP, error) { // Limitations: for untunneled resolves, there is currently no resolve - // parameter replay, and no support for pre-resolved IPs. + // parameter replay. params, err := resolver.MakeResolveParameters( config.GetParameters().Get(), frontingProviderID, hostname) @@ -415,6 +401,10 @@ func UntunneledResolveIP( // The context is applied to underlying TCP dials. The caller is responsible // for applying the context to requests made with the returned http.Client. // +// payloadSecure must only be set if all HTTP plaintext payloads sent through +// the returned net/http.Client will be wrapped in their own transport security +// layer, which permits skipping of server certificate verification. +// // Warning: it is not safe to call makeFrontedHTTPClient concurrently with the // same dialConfig when tunneled is true because dialConfig will be used // directly, instead of copied, which can lead to a crash when fields not safe @@ -426,10 +416,16 @@ func makeFrontedHTTPClient( dialConfig *DialConfig, frontingSpecs parameters.FrontingSpecs, selectedFrontingProviderID func(string), - skipVerify bool, - disableSystemRootCAs bool) (*http.Client, func() common.APIParameters, error) { + skipVerify, + disableSystemRootCAs, + payloadSecure bool) (*http.Client, func() common.APIParameters, error) { + + if !payloadSecure && (skipVerify || disableSystemRootCAs) { + return nil, nil, errors.TraceNew("cannot skip certificate verification if payload insecure") + } frontingProviderID, + frontingTransport, meekFrontingDialAddress, meekSNIServerName, meekVerifyServerName, @@ -439,6 +435,10 @@ func makeFrontedHTTPClient( return nil, nil, errors.Trace(err) } + if frontingTransport != protocol.FRONTING_TRANSPORT_HTTPS { + return nil, nil, errors.TraceNew("unsupported fronting transport") + } + if selectedFrontingProviderID != nil { selectedFrontingProviderID(frontingProviderID) } @@ -503,10 +503,15 @@ func makeFrontedHTTPClient( } } + var meekMode MeekMode = MeekModePlaintextRoundTrip + if payloadSecure { + meekMode = MeekModeWrappedPlaintextRoundTrip + } + meekConfig := &MeekConfig{ DiagnosticID: frontingProviderID, Parameters: config.GetParameters(), - Mode: MeekModePlaintextRoundTrip, + Mode: meekMode, DialAddress: meekDialAddress, UseHTTPS: true, TLSProfile: tlsProfile, @@ -689,6 +694,7 @@ func MakeUntunneledHTTPClient( untunneledDialConfig *DialConfig, skipVerify bool, disableSystemRootCAs bool, + payloadSecure bool, frontingSpecs parameters.FrontingSpecs, selectedFrontingProviderID func(string)) (*http.Client, func() common.APIParameters, error) { @@ -704,7 +710,8 @@ func MakeUntunneledHTTPClient( frontingSpecs, selectedFrontingProviderID, false, - disableSystemRootCAs) + disableSystemRootCAs, + payloadSecure) if err != nil { return nil, nil, errors.Trace(err) } @@ -750,8 +757,9 @@ func MakeTunneledHTTPClient( ctx context.Context, config *Config, tunnel *Tunnel, - skipVerify bool, - disableSystemRootCAs bool, + skipVerify, + disableSystemRootCAs, + payloadSecure bool, frontingSpecs parameters.FrontingSpecs, selectedFrontingProviderID func(string)) (*http.Client, func() common.APIParameters, error) { @@ -784,7 +792,8 @@ func MakeTunneledHTTPClient( frontingSpecs, selectedFrontingProviderID, false, - disableSystemRootCAs) + disableSystemRootCAs, + payloadSecure) if err != nil { return nil, nil, errors.Trace(err) } @@ -827,7 +836,8 @@ func MakeDownloadHTTPClient( tunnel *Tunnel, untunneledDialConfig *DialConfig, skipVerify, - disableSystemRootCAs bool, + disableSystemRootCAs, + payloadSecure bool, frontingSpecs parameters.FrontingSpecs, selectedFrontingProviderID func(string)) (*http.Client, bool, func() common.APIParameters, error) { @@ -845,6 +855,7 @@ func MakeDownloadHTTPClient( tunnel, skipVerify || disableSystemRootCAs, disableSystemRootCAs, + payloadSecure, frontingSpecs, selectedFrontingProviderID) if err != nil { @@ -858,6 +869,7 @@ func MakeDownloadHTTPClient( untunneledDialConfig, skipVerify, disableSystemRootCAs, + payloadSecure, frontingSpecs, selectedFrontingProviderID) if err != nil { diff --git a/psiphon/notice.go b/psiphon/notice.go index bcf734c41..4ed7d664a 100644 --- a/psiphon/notice.go +++ b/psiphon/notice.go @@ -220,8 +220,16 @@ func (nl *noticeLogger) outputNotice(noticeType string, noticeFlags uint32, args obj["timestamp"] = time.Now().UTC().Format(common.RFC3339Milli) for i := 0; i < len(args)-1; i += 2 { name, ok := args[i].(string) - value := args[i+1] if ok { + + value := args[i+1] + + // encoding/json marshals error types as "{}", so convert to error + // message string. + if err, isError := value.(error); isError { + value = err.Error() + } + noticeData[name] = value } } @@ -656,6 +664,13 @@ func noticeWithDialParameters(noticeType string, dialParams *DialParameters, pos args = append(args, name, value) } } + + if protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) { + metrics := dialParams.GetInproxyMetrics() + for name, value := range metrics { + args = append(args, name, value) + } + } } singletonNoticeLogger.outputNotice( @@ -1082,6 +1097,51 @@ func NoticeSkipServerEntry(format string, args ...interface{}) { "SkipServerEntry", 0, "reason", reason) } +// NoticeInproxyOperatorMessage emits a message to be displayed to the proxy +// operator. +func NoticeInproxyOperatorMessage(messageJSON string) { + singletonNoticeLogger.outputNotice( + "InproxyOperatorMessage", 0, + "message", messageJSON) +} + +// NoticeInproxyProxyActivity reports proxy usage statistics. The stats are +// for activity since the last NoticeInproxyProxyActivity report. +// +// This is not a diagnostic notice: the user app has requested this notice +// with EmitproxyActivity for functionality such as traffic display; and this +// frequent notice is not intended to be included with feedback. +func NoticeInproxyProxyActivity( + connectingClients int32, + connectedClients int32, + bytesUp int64, + bytesDown int64) { + + singletonNoticeLogger.outputNotice( + "InproxyProxyActivity", noticeIsNotDiagnostic, + "connectingClients", connectingClients, + "connectedClients", connectedClients, + "bytesUp", bytesUp, + "bytesDown", bytesDown) +} + +// NoticeInproxyProxyTotalActivity reports how many proxied bytes have been +// transferred in total up to this point; in addition to current connection +// status. This is a diagnostic notice. +func NoticeInproxyProxyTotalActivity( + connectingClients int32, + connectedClients int32, + totalBytesUp int64, + totalBytesDown int64) { + + singletonNoticeLogger.outputNotice( + "InproxyProxyTotalActivity", noticeIsDiagnostic, + "connectingClients", connectingClients, + "connectedClients", connectedClients, + "totalBytesUp", totalBytesUp, + "totalBytesDown", totalBytesDown) +} + type repetitiveNoticeState struct { message string repeats int @@ -1247,23 +1307,28 @@ func (writer *NoticeWriter) Write(p []byte) (n int, err error) { // NoticeCommonLogger maps the common.Logger interface to the notice facility. // This is used to make the notice facility available to other packages that // don't import the "psiphon" package. -func NoticeCommonLogger() common.Logger { - return &commonLogger{} +func NoticeCommonLogger(debugLogging bool) common.Logger { + return &commonLogger{ + debugLogging: debugLogging, + } } type commonLogger struct { + debugLogging bool } func (logger *commonLogger) WithTrace() common.LogTrace { return &commonLogTrace{ - trace: stacktrace.GetParentFunctionName(), + trace: stacktrace.GetParentFunctionName(), + debugLogging: logger.debugLogging, } } func (logger *commonLogger) WithTraceFields(fields common.LogFields) common.LogTrace { return &commonLogTrace{ - trace: stacktrace.GetParentFunctionName(), - fields: fields, + trace: stacktrace.GetParentFunctionName(), + fields: fields, + debugLogging: logger.debugLogging, } } @@ -1273,6 +1338,10 @@ func (logger *commonLogger) LogMetric(metric string, fields common.LogFields) { listCommonFields(fields)...) } +func (log *commonLogger) IsLogLevelDebug() bool { + return log.debugLogging +} + func listCommonFields(fields common.LogFields) []interface{} { fieldList := make([]interface{}, 0) for name, value := range fields { @@ -1282,8 +1351,9 @@ func listCommonFields(fields common.LogFields) []interface{} { } type commonLogTrace struct { - trace string - fields common.LogFields + trace string + fields common.LogFields + debugLogging bool } func (log *commonLogTrace) outputNotice( @@ -1299,7 +1369,10 @@ func (log *commonLogTrace) outputNotice( } func (log *commonLogTrace) Debug(args ...interface{}) { - // Ignored. + if !log.debugLogging { + return + } + log.outputNotice("Debug", args...) } func (log *commonLogTrace) Info(args ...interface{}) { diff --git a/psiphon/remoteServerList.go b/psiphon/remoteServerList.go index 4dad468d6..80a1cfed6 100644 --- a/psiphon/remoteServerList.go +++ b/psiphon/remoteServerList.go @@ -461,6 +461,7 @@ func downloadRemoteServerListFile( // MakeDownloadHttpClient will select either a tunneled // or untunneled configuration. + payloadSecure := true httpClient, tunneled, getParams, err := MakeDownloadHTTPClient( ctx, config, @@ -468,6 +469,7 @@ func downloadRemoteServerListFile( untunneledDialConfig, skipVerify, disableSystemRootCAs, + payloadSecure, frontingSpecs, func(frontingProviderID string) { NoticeInfo( diff --git a/psiphon/remoteServerList_test.go b/psiphon/remoteServerList_test.go index 8f3a1b7cf..96d1d2653 100644 --- a/psiphon/remoteServerList_test.go +++ b/psiphon/remoteServerList_test.go @@ -89,12 +89,10 @@ func testObfuscatedRemoteServerLists(t *testing.T, omitMD5Sums bool) { serverConfigJSON, _, _, _, encodedServerEntry, err := server.GenerateConfig( &server.GenerateConfigParams{ - ServerIPAddress: serverIPAddress, - EnableSSHAPIRequests: true, - WebServerPort: int(atomic.AddInt32(&nextServerPort, 1)), - TunnelProtocolPorts: map[string]int{"OSSH": int(atomic.AddInt32(&nextServerPort, 1))}, - LogFilename: filepath.Join(testDataDirName, "psiphond.log"), - LogLevel: "debug", + ServerIPAddress: serverIPAddress, + TunnelProtocolPorts: map[string]int{"OSSH": int(atomic.AddInt32(&nextServerPort, 1))}, + LogFilename: filepath.Join(testDataDirName, "psiphond.log"), + LogLevel: "debug", // "defer os.RemoveAll" will cause a log write error SkipPanickingLogWriter: true, @@ -389,8 +387,8 @@ func testObfuscatedRemoteServerLists(t *testing.T, omitMD5Sums bool) { { "ClientPlatform" : "", "ClientVersion" : "0", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "ConnectionWorkerPoolSize" : 1, "EstablishTunnelPausePeriodSeconds" : 1, "FetchRemoteServerListRetryPeriodMilliseconds" : 250, diff --git a/psiphon/server/api.go b/psiphon/server/api.go index a34a887fc..acf70d3c2 100644 --- a/psiphon/server/api.go +++ b/psiphon/server/api.go @@ -20,10 +20,8 @@ package server import ( - "crypto/subtle" "encoding/base64" "encoding/json" - std_errors "errors" "net" "regexp" "strconv" @@ -34,8 +32,10 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/fragmentor" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" + "github.com/fxamacker/cbor/v2" ) const ( @@ -50,18 +50,13 @@ const ( // sshAPIRequestHandler routes Psiphon API requests transported as // JSON objects via the SSH request mechanism. // -// The API request handlers, handshakeAPIRequestHandler, etc., are -// reused by webServer which offers the Psiphon API via web transport. -// // The API request parameters and event log values follow the legacy // psi_web protocol and naming conventions. The API is compatible with // all tunnel-core clients but are not backwards compatible with all // legacy clients. func sshAPIRequestHandler( support *SupportServices, - clientAddr string, - geoIPData GeoIPData, - authorizedAccessTypes []string, + sshClient *sshClient, name string, requestPayload []byte) ([]byte, error) { @@ -78,32 +73,22 @@ func sshAPIRequestHandler( // to a string, not a decoded []byte, as required. var params common.APIParameters - err := json.Unmarshal(requestPayload, ¶ms) + + // The request payload is either packed CBOR or legacy JSON. + + params, isPacked, err := protocol.GetPackedAPIParametersRequestPayload(requestPayload) if err != nil { return nil, errors.Tracef( - "invalid payload for request name: %s: %s", name, err) + "invalid packed payload for request name: %s: %s", name, err) } - return dispatchAPIRequestHandler( - support, - protocol.PSIPHON_SSH_API_PROTOCOL, - clientAddr, - geoIPData, - authorizedAccessTypes, - name, - params) -} - -// dispatchAPIRequestHandler is the common dispatch point for both -// web and SSH API requests. -func dispatchAPIRequestHandler( - support *SupportServices, - apiProtocol string, - clientAddr string, - geoIPData GeoIPData, - authorizedAccessTypes []string, - name string, - params common.APIParameters) (response []byte, reterr error) { + if !isPacked { + err := json.Unmarshal(requestPayload, ¶ms) + if err != nil { + return nil, errors.Tracef( + "invalid payload for request name: %s: %s", name, err) + } + } // Before invoking the handlers, enforce some preconditions: // @@ -122,23 +107,7 @@ func dispatchAPIRequestHandler( if name != protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME { - // TODO: same session-ID-lookup TODO in handshakeAPIRequestHandler - // applies here. - sessionID, err := getStringRequestParam(params, "client_session_id") - if err == nil { - // Note: follows/duplicates baseParams validation - if !isHexDigits(support.Config, sessionID) { - err = std_errors.New("invalid param: client_session_id") - } - } - if err != nil { - return nil, errors.Trace(err) - } - - completed, exhausted, err := support.TunnelServer.GetClientHandshaked(sessionID) - if err != nil { - return nil, errors.Trace(err) - } + completed, exhausted := sshClient.getHandshaked() if !completed { return nil, errors.TraceNew("handshake not completed") } @@ -151,19 +120,19 @@ func dispatchAPIRequestHandler( case protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME: return handshakeAPIRequestHandler( - support, apiProtocol, clientAddr, geoIPData, params) + support, protocol.PSIPHON_API_PROTOCOL_SSH, sshClient, params) case protocol.PSIPHON_API_CONNECTED_REQUEST_NAME: return connectedAPIRequestHandler( - support, clientAddr, geoIPData, authorizedAccessTypes, params) + support, sshClient, params) case protocol.PSIPHON_API_STATUS_REQUEST_NAME: return statusAPIRequestHandler( - support, clientAddr, geoIPData, authorizedAccessTypes, params) + support, sshClient, params) case protocol.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME: return clientVerificationAPIRequestHandler( - support, clientAddr, geoIPData, authorizedAccessTypes, params) + support, sshClient, params) } return nil, errors.Tracef("invalid request name: %s", name) @@ -171,14 +140,11 @@ func dispatchAPIRequestHandler( var handshakeRequestParams = append( append( - append( - []requestParamSpec{ - // Legacy clients may not send "session_id" in handshake - {"session_id", isHexDigits, requestParamOptional}, - {"missing_server_entry_signature", isBase64String, requestParamOptional}, - {"missing_server_entry_provider_id", isBase64String, requestParamOptional}}, - baseParams...), - baseDialParams...), + []requestParamSpec{ + {"missing_server_entry_signature", isBase64String, requestParamOptional}, + {"missing_server_entry_provider_id", isBase64String, requestParamOptional}, + }, + baseAndDialParams...), tacticsParams...) // handshakeAPIRequestHandler implements the "handshake" API request. @@ -188,10 +154,68 @@ var handshakeRequestParams = append( func handshakeAPIRequestHandler( support *SupportServices, apiProtocol string, - clientAddr string, - geoIPData GeoIPData, + sshClient *sshClient, params common.APIParameters) ([]byte, error) { + var clientGeoIPData GeoIPData + + var inproxyClientIP string + var inproxyClientGeoIPData GeoIPData + var inproxyRelayLogFields common.LogFields + + if sshClient.isInproxyTunnelProtocol { + + inproxyConnectionID, err := getStringRequestParam(params, "inproxy_connection_id") + + if err != nil { + return nil, errors.Trace(err) + } + + // Complete the in-proxy broker/server relay before the rest of + // handshake in order to obtain the original client IP and other + // inputs sent from the broker. + // + // In the best and typical case, the broker has already established a + // secure session with this server and the inproxy_relay_packet is + // the broker report application-level payload. Otherwise, if there + // is no session or the session has expired, session handshake + // messages will be relayed to the broker via the client, using SSH + // requests to the client. These requests/responses happen while the + // handshake response remains outstanding, as this handler needs the + // original client IP and its geolocation data in order to determine + // the correct landing pages, traffic rules, tactics, etc. + // + // The client should extends its handshake timeout to accommodate + // potential relay round trips. + + inproxyRelayPacketStr, err := getStringRequestParam(params, "inproxy_relay_packet") + if err != nil { + return nil, errors.Trace(err) + } + + inproxyRelayPacket, err := base64.RawStdEncoding.DecodeString(inproxyRelayPacketStr) + if err != nil { + return nil, errors.Trace(err) + } + + inproxyClientIP, inproxyRelayLogFields, err = doHandshakeInproxyBrokerRelay( + sshClient, + inproxyConnectionID, + inproxyRelayPacket) + if err != nil { + return nil, errors.Trace(err) + } + + inproxyClientGeoIPData = support.GeoIPService.Lookup(inproxyClientIP) + clientGeoIPData = inproxyClientGeoIPData + + } else { + + clientGeoIPData = sshClient.getClientGeoIPData() + } + + // Check input parameters + // Note: ignoring legacy "known_servers" params err := validateRequestParams(support.Config, params, handshakeRequestParams) @@ -199,7 +223,6 @@ func handshakeAPIRequestHandler( return nil, errors.Trace(err) } - sessionID, _ := getStringRequestParam(params, "client_session_id") sponsorID, _ := getStringRequestParam(params, "sponsor_id") clientVersion, _ := getStringRequestParam(params, "client_version") clientPlatform, _ := getStringRequestParam(params, "client_platform") @@ -210,6 +233,19 @@ func handshakeAPIRequestHandler( // the client, a value of 0 will be used. establishedTunnelsCount, _ := getIntStringRequestParam(params, "established_tunnels_count") + var authorizations []string + if params[protocol.PSIPHON_API_HANDSHAKE_AUTHORIZATIONS] != nil { + authorizations, err = getStringArrayRequestParam(params, protocol.PSIPHON_API_HANDSHAKE_AUTHORIZATIONS) + if err != nil { + return nil, errors.Trace(err) + } + } + + deviceRegion, ok := getOptionalStringRequestParam(params, "device_region") + if !ok { + deviceRegion = GEOIP_UNKNOWN_VALUE + } + // splitTunnelOwnRegion indicates if the client is requesting split tunnel // mode to be applied to the client's own country. When omitted by the // client, the value will be false. @@ -222,7 +258,7 @@ func handshakeAPIRequestHandler( ownRegion := "" if splitTunnelOwnRegion { - ownRegion = geoIPData.Country + ownRegion = clientGeoIPData.Country } var splitTunnelLookup *splitTunnelLookup if ownRegion != "" || len(splitTunnelOtherRegions) > 0 { @@ -232,81 +268,46 @@ func handshakeAPIRequestHandler( } } - var authorizations []string - if params[protocol.PSIPHON_API_HANDSHAKE_AUTHORIZATIONS] != nil { - authorizations, err = getStringArrayRequestParam(params, protocol.PSIPHON_API_HANDSHAKE_AUTHORIZATIONS) - if err != nil { - return nil, errors.Trace(err) - } - } - - deviceRegion, ok := getOptionalStringRequestParam(params, "device_region") - if !ok { - deviceRegion = GEOIP_UNKNOWN_VALUE - } - // Note: no guarantee that PsinetDatabase won't reload between database calls db := support.PsinetDatabase httpsRequestRegexes, domainBytesChecksum := db.GetHttpsRequestRegexes(sponsorID) + tacticsPayload, err := support.TacticsServer.GetTacticsPayload( + common.GeoIPData(clientGeoIPData), params) + if err != nil { + return nil, errors.Trace(err) + } + + var newTacticsTag string + if tacticsPayload != nil && len(tacticsPayload.Tactics) > 0 { + newTacticsTag = tacticsPayload.Tag + } + // Flag the SSH client as having completed its handshake. This // may reselect traffic rules and starts allowing port forwards. - // TODO: in the case of SSH API requests, the actual sshClient could - // be passed in and used here. The session ID lookup is only strictly - // necessary to support web API requests. - handshakeStateInfo, err := support.TunnelServer.SetClientHandshakeState( - sessionID, + apiParams := copyBaseAndDialParams(params) + + handshakeStateInfo, err := sshClient.setHandshakeState( handshakeState{ completed: true, apiProtocol: apiProtocol, - apiParams: copyBaseSessionAndDialParams(params), + apiParams: apiParams, domainBytesChecksum: domainBytesChecksum, establishedTunnelsCount: establishedTunnelsCount, splitTunnelLookup: splitTunnelLookup, deviceRegion: deviceRegion, + newTacticsTag: newTacticsTag, + inproxyClientIP: inproxyClientIP, + inproxyClientGeoIPData: inproxyClientGeoIPData, + inproxyRelayLogFields: inproxyRelayLogFields, }, authorizations) if err != nil { return nil, errors.Trace(err) } - tacticsPayload, err := support.TacticsServer.GetTacticsPayload( - common.GeoIPData(geoIPData), params) - if err != nil { - return nil, errors.Trace(err) - } - - var marshaledTacticsPayload []byte - - if tacticsPayload != nil { - - marshaledTacticsPayload, err = json.Marshal(tacticsPayload) - if err != nil { - return nil, errors.Trace(err) - } - - // Log a metric when new tactics are issued. Logging here indicates that - // the handshake tactics mechanism is active; but logging for every - // handshake creates unneccesary log data. - - if len(tacticsPayload.Tactics) > 0 { - - logFields := getRequestLogFields( - tactics.TACTICS_METRIC_EVENT_NAME, - geoIPData, - handshakeStateInfo.authorizedAccessTypes, - params, - handshakeRequestParams) - - logFields[tactics.NEW_TACTICS_TAG_LOG_FIELD_NAME] = tacticsPayload.Tag - logFields[tactics.IS_TACTICS_REQUEST_LOG_FIELD_NAME] = false - - log.LogRawFieldsWithTimestamp(logFields) - } - } - // The log comes _after_ SetClientHandshakeState, in case that call rejects // the state change (for example, if a second handshake is performed) // @@ -315,38 +316,38 @@ func handshakeAPIRequestHandler( // common API parameters and "handshake_completed" flag, this handshake // log is mostly redundant and set to debug level. - log.WithTraceFields( - getRequestLogFields( + if IsLogLevelDebug() { + logFields := getRequestLogFields( "", - geoIPData, + sshClient.sessionID, + clientGeoIPData, handshakeStateInfo.authorizedAccessTypes, params, - handshakeRequestParams)).Debug("handshake") + handshakeRequestParams) + log.WithTraceFields(logFields).Debug("handshake") + } pad_response, _ := getPaddingSizeRequestParam(params, "pad_response") // Discover new servers - disableDiscovery, err := support.TunnelServer.GetClientDisableDiscovery(sessionID) - if err != nil { - return nil, errors.Trace(err) - } - var encodedServerList []string + if !sshClient.getDisableDiscovery() { - if !disableDiscovery { + clientIP := "" + if sshClient.isInproxyTunnelProtocol { + clientIP = inproxyClientIP + } else if sshClient.peerAddr != nil { + clientIP, _, _ = net.SplitHostPort(sshClient.peerAddr.String()) - host, _, err := net.SplitHostPort(clientAddr) - if err != nil { - return nil, errors.Trace(err) } - clientIP := net.ParseIP(host) - if clientIP == nil { - return nil, errors.TraceNew("missing client IP") + IP := net.ParseIP(clientIP) + if IP == nil { + return nil, errors.TraceNew("invalid client IP") } - encodedServerList = support.discovery.DiscoverServers(clientIP) + encodedServerList = support.discovery.DiscoverServers(IP) } // When the client indicates that it used an out-of-date server entry for @@ -382,17 +383,47 @@ func handshakeAPIRequestHandler( // clients. homepages := db.GetRandomizedHomepages( - sponsorID, geoIPData.Country, geoIPData.ASN, deviceRegion, isMobile) + sponsorID, + clientGeoIPData.Country, + clientGeoIPData.ASN, + deviceRegion, + isMobile) + + clientAddress := "" + if sshClient.isInproxyTunnelProtocol { + + // ClientAddress not supported for in-proxy tunnel protocols: + // + // - We don't want to return the address of the direct peer, the + // in-proxy proxy; + // - The known port number will correspond to the in-proxy proxy + // source address, not the client; + // - While we assume that the the original client IP from the broker + // is representative for geolocation, an actual direct connection + // to the Psiphon server from the client may route differently and + // use a different IP address. + + clientAddress = "" + } else if sshClient.peerAddr != nil { + clientAddress = sshClient.peerAddr.String() + } + + var marshaledTacticsPayload []byte + if tacticsPayload != nil { + marshaledTacticsPayload, err = json.Marshal(tacticsPayload) + if err != nil { + return nil, errors.Trace(err) + } + } handshakeResponse := protocol.HandshakeResponse{ - SSHSessionID: sessionID, Homepages: homepages, UpgradeClientVersion: db.GetUpgradeClientVersion(clientVersion, normalizedPlatform), PageViewRegexes: make([]map[string]string, 0), HttpsRequestRegexes: httpsRequestRegexes, EncodedServerList: encodedServerList, - ClientRegion: geoIPData.Country, - ClientAddress: clientAddr, + ClientRegion: clientGeoIPData.Country, + ClientAddress: clientAddress, ServerTimestamp: common.GetCurrentTimestamp(), ActiveAuthorizationIDs: handshakeStateInfo.activeAuthorizationIDs, TacticsPayload: marshaledTacticsPayload, @@ -402,6 +433,9 @@ func handshakeAPIRequestHandler( Padding: strings.Repeat(" ", pad_response), } + // TODO: as a future enhancement, pack and CBOR encode this and other API + // responses + responsePayload, err := json.Marshal(handshakeResponse) if err != nil { return nil, errors.Trace(err) @@ -410,12 +444,110 @@ func handshakeAPIRequestHandler( return responsePayload, nil } +func doHandshakeInproxyBrokerRelay( + sshClient *sshClient, + clientConnectionID string, + initialRelayPacket []byte) (string, common.LogFields, error) { + + connectionID, err := inproxy.IDFromString(clientConnectionID) + if err != nil { + return "", nil, errors.Trace(err) + } + + clientIP := "" + var logFields common.LogFields + + // This first packet from broker arrives via the client handshake. If + // there is an established, non-expired session, this packet will contain + // the application-level broker report and the relay will complete + // immediately. + + relayPacket := initialRelayPacket + + for i := 0; i < inproxy.MaxRelayRoundTrips; i++ { + + // broker -> server + + relayPacket, err = sshClient.sshServer.inproxyBrokerSessions.HandlePacket( + CommonLogger(log), + relayPacket, + connectionID, + func( + brokerVerifiedOriginalClientIP string, + fields common.LogFields) { + + // Once the broker report is received, this callback is invoked. + clientIP = brokerVerifiedOriginalClientIP + logFields = fields + }) + if err != nil { + if relayPacket == nil { + + // If there is an error and no relay packet, the packet is + // invalid. Drop the packet and return an error. Do _not_ + // reset the session, otherwise a malicious client could + // interrupt a valid broker/server session with a malformed packet. + return "", nil, errors.Trace(err) + } + + // In the case of expired sessions, a reset session token is sent + // to the broker, so this is not a failure condition; the error + // is for logging only. Continue to ship relayPacket. + + log.WithTraceFields(LogFields{"error": err}).Warning( + "HandlePacket returned packet and error") + } + + if relayPacket == nil { + + // The relay is complete; the handler recording the clientIP and + // logFields was invoked. + return clientIP, logFields, nil + } + + // server -> broker + + // Send an SSH request back to client with next packet for broker; + // then the client relays that to the broker and returns the broker's + // next response in the SSH response. + + request := protocol.InproxyRelayRequest{ + Packet: relayPacket, + } + requestPayload, err := protocol.CBOREncoding.Marshal(request) + if err != nil { + return "", nil, errors.Trace(err) + } + + ok, responsePayload, err := sshClient.sshConn.SendRequest( + protocol.PSIPHON_API_INPROXY_RELAY_REQUEST_NAME, + true, + requestPayload) + if err != nil { + return "", nil, errors.Trace(err) + } + if !ok { + return "", nil, errors.TraceNew("client rejected request") + } + + var response protocol.InproxyRelayResponse + err = cbor.Unmarshal(responsePayload, &response) + if err != nil { + return "", nil, errors.Trace(err) + } + + relayPacket = response.Packet + } + + return "", nil, errors.Tracef("exceeded %d relay round trips", inproxy.MaxRelayRoundTrips) +} + // uniqueUserParams are the connected request parameters which are logged for // unique_user events. var uniqueUserParams = append( []requestParamSpec{ {"last_connected", isLastConnected, 0}}, - baseSessionParams...) + baseParams...) var connectedRequestParams = append( []requestParamSpec{ @@ -440,9 +572,7 @@ var updateOnConnectedParamNames = append( // connected_timestamp is truncated as a privacy measure. func connectedAPIRequestHandler( support *SupportServices, - clientAddr string, - geoIPData GeoIPData, - authorizedAccessTypes []string, + sshClient *sshClient, params common.APIParameters) ([]byte, error) { err := validateRequestParams(support.Config, params, connectedRequestParams) @@ -450,7 +580,13 @@ func connectedAPIRequestHandler( return nil, errors.Trace(err) } - sessionID, _ := getStringRequestParam(params, "client_session_id") + // Note: unlock before use is only safe as long as referenced sshClient data, + // such as slices in handshakeState, is read-only after initially set. + + sshClient.Lock() + authorizedAccessTypes := sshClient.handshakeState.authorizedAccessTypes + sshClient.Unlock() + lastConnected, _ := getStringRequestParam(params, "last_connected") // Update, for server_tunnel logging, upstream fragmentor metrics, as the @@ -459,13 +595,7 @@ func connectedAPIRequestHandler( // are reported only in the connected request are added to server_tunnel // here. - // TODO: same session-ID-lookup TODO in handshakeAPIRequestHandler - // applies here. - err = support.TunnelServer.UpdateClientAPIParameters( - sessionID, copyUpdateOnConnectedParams(params)) - if err != nil { - return nil, errors.Trace(err) - } + sshClient.updateAPIParameters(copyUpdateOnConnectedParams(params)) connectedTimestamp := common.TruncateTimestampToHour(common.GetCurrentTimestamp()) @@ -495,7 +625,8 @@ func connectedAPIRequestHandler( log.LogRawFieldsWithTimestamp( getRequestLogFields( "unique_user", - geoIPData, + sshClient.sessionID, + sshClient.getClientGeoIPData(), authorizedAccessTypes, params, uniqueUserParams)) @@ -516,10 +647,12 @@ func connectedAPIRequestHandler( return responsePayload, nil } -var statusRequestParams = baseSessionParams +var statusRequestParams = baseParams var remoteServerListStatParams = append( []requestParamSpec{ + // Legacy clients don't record the session_id with remote_server_list_stats entries. + {"session_id", isHexDigits, requestParamOptional}, {"client_download_timestamp", isISO8601Date, 0}, {"tunneled", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, {"url", isAnyString, 0}, @@ -539,7 +672,7 @@ var remoteServerListStatParams = append( {"tls_fragmented", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, }, - baseSessionParams...) + baseParams...) // Backwards compatibility case: legacy clients do not include these fields in // the remote_server_list_stats entries. Use the values from the outer status @@ -548,7 +681,6 @@ var remoteServerListStatParams = append( // recording time). Note that all but client_build_rev, device_region, and // device_location are required fields. var remoteServerListStatBackwardsCompatibilityParamNames = []string{ - "session_id", "propagation_channel_id", "sponsor_id", "client_version", @@ -572,7 +704,7 @@ var failedTunnelStatParams = append( {"bytes_up", isIntString, requestParamOptional | requestParamLogStringAsInt}, {"bytes_down", isIntString, requestParamOptional | requestParamLogStringAsInt}, {"tunnel_error", isAnyString, 0}}, - baseSessionAndDialParams...) + baseAndDialParams...) // statusAPIRequestHandler implements the "status" API request. // Clients make periodic status requests which deliver client-side @@ -582,9 +714,7 @@ var failedTunnelStatParams = append( // string). Stats processor must handle this input with care. func statusAPIRequestHandler( support *SupportServices, - clientAddr string, - geoIPData GeoIPData, - authorizedAccessTypes []string, + sshClient *sshClient, params common.APIParameters) ([]byte, error) { err := validateRequestParams(support.Config, params, statusRequestParams) @@ -592,7 +722,9 @@ func statusAPIRequestHandler( return nil, errors.Trace(err) } - sessionID, _ := getStringRequestParam(params, "client_session_id") + sshClient.Lock() + authorizedAccessTypes := sshClient.handshakeState.authorizedAccessTypes + sshClient.Unlock() statusData, err := getJSONObjectRequestParam(params, "statusData") if err != nil { @@ -613,12 +745,7 @@ func statusAPIRequestHandler( // configured to do so in the handshake reponse. Legacy clients may still // report "(OTHER)" host_bytes when no regexes are set. Drop those stats. - acceptDomainBytes, err := support.TunnelServer.AcceptClientDomainBytes(sessionID) - if err != nil { - return nil, errors.Trace(err) - } - - if acceptDomainBytes && statusData["host_bytes"] != nil { + if sshClient.acceptDomainBytes() && statusData["host_bytes"] != nil { hostBytes, err := getMapStringInt64RequestParam(statusData, "host_bytes") if err != nil { @@ -628,7 +755,8 @@ func statusAPIRequestHandler( domainBytesFields := getRequestLogFields( "domain_bytes", - geoIPData, + sshClient.sessionID, + sshClient.getClientGeoIPData(), authorizedAccessTypes, params, statusRequestParams) @@ -662,11 +790,6 @@ func statusAPIRequestHandler( } } - // For validation, copy expected fields from the outer - // statusRequestParams. - remoteServerListStat["server_secret"] = params["server_secret"] - remoteServerListStat["client_session_id"] = params["client_session_id"] - err := validateRequestParams(support.Config, remoteServerListStat, remoteServerListStatParams) if err != nil { // Occasionally, clients may send corrupt persistent stat data. Do not @@ -677,7 +800,8 @@ func statusAPIRequestHandler( remoteServerListFields := getRequestLogFields( "remote_server_list", - geoIPData, + "", // Use the session_id the client recorded with the event + sshClient.getClientGeoIPData(), authorizedAccessTypes, remoteServerListStat, remoteServerListStatParams) @@ -704,10 +828,6 @@ func statusAPIRequestHandler( } for _, failedTunnelStat := range failedTunnelStats { - // failed_tunnel supplies a full set of base params, but the server secret - // must use the correct value from the outer statusRequestParams. - failedTunnelStat["server_secret"] = params["server_secret"] - err := validateRequestParams(support.Config, failedTunnelStat, failedTunnelStatParams) if err != nil { // Occasionally, clients may send corrupt persistent stat data. Do not @@ -721,7 +841,8 @@ func statusAPIRequestHandler( failedTunnelFields := getRequestLogFields( "failed_tunnel", - geoIPData, + "", // Use the session_id the client recorded with the event + sshClient.getClientGeoIPData(), authorizedAccessTypes, failedTunnelStat, failedTunnelStatParams) @@ -806,11 +927,9 @@ func statusAPIRequestHandler( // clientVerificationAPIRequestHandler is just a compliance stub // for older Android clients that still send verification requests func clientVerificationAPIRequestHandler( - support *SupportServices, - clientAddr string, - geoIPData GeoIPData, - authorizedAccessTypes []string, - params common.APIParameters) ([]byte, error) { + _ *SupportServices, + _ *sshClient, + _ common.APIParameters) ([]byte, error) { return make([]byte, 0), nil } @@ -820,8 +939,11 @@ var tacticsParams = []requestParamSpec{ } var tacticsRequestParams = append( - append([]requestParamSpec(nil), tacticsParams...), - baseSessionAndDialParams...) + append( + []requestParamSpec{ + {"session_id", isHexDigits, 0}}, + tacticsParams...), + baseAndDialParams...) func getTacticsAPIParameterValidator(config *Config) common.APIParameterValidator { return func(params common.APIParameters) error { @@ -835,6 +957,7 @@ func getTacticsAPIParameterLogFieldFormatter() common.APIParameterLogFieldFormat logFields := getRequestLogFields( tactics.TACTICS_METRIC_EVENT_NAME, + "", // Use the session_id the client reported GeoIPData(geoIPData), nil, // authorizedAccessTypes are not known yet params, @@ -844,6 +967,35 @@ func getTacticsAPIParameterLogFieldFormatter() common.APIParameterLogFieldFormat } } +var inproxyBrokerRequestParams = append( + append( + []requestParamSpec{ + {"session_id", isHexDigits, 0}}, + tacticsParams...), + baseParams...) + +func getInproxyBrokerAPIParameterValidator(config *Config) common.APIParameterValidator { + return func(params common.APIParameters) error { + return validateRequestParams(config, params, inproxyBrokerRequestParams) + } +} + +func getInproxyBrokerAPIParameterLogFieldFormatter() common.APIParameterLogFieldFormatter { + + return func(geoIPData common.GeoIPData, params common.APIParameters) common.LogFields { + + logFields := getRequestLogFields( + "inproxy_broker", + "", // Use the session_id the client reported + GeoIPData(geoIPData), + nil, + params, + inproxyBrokerRequestParams) + + return common.LogFields(logFields) + } +} + // requestParamSpec defines a request parameter. Each param is expected to be // a string, unless requestParamArray is specified, in which case an array of // strings is expected. @@ -869,8 +1021,6 @@ const ( // baseParams are the basic request parameters that are expected for all API // requests and log events. var baseParams = []requestParamSpec{ - {"server_secret", isServerSecret, requestParamNotLogged}, - {"client_session_id", isHexDigits, requestParamNotLogged}, {"propagation_channel_id", isHexDigits, 0}, {"sponsor_id", isHexDigits, 0}, {"client_version", isIntString, requestParamLogStringAsInt}, @@ -879,16 +1029,10 @@ var baseParams = []requestParamSpec{ {"client_build_rev", isHexDigits, requestParamOptional}, {"device_region", isAnyString, requestParamOptional}, {"device_location", isGeoHashString, requestParamOptional}, + {"network_type", isAnyString, requestParamOptional}, + {tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME, isAnyString, requestParamOptional}, } -// baseSessionParams adds to baseParams the required session_id parameter. For -// all requests except handshake, all existing clients are expected to send -// session_id. Legacy clients may not send "session_id" in handshake. -var baseSessionParams = append( - []requestParamSpec{ - {"session_id", isHexDigits, 0}}, - baseParams...) - // baseDialParams are the dial parameters, per-tunnel network protocol and // obfuscation metrics which are logged with server_tunnel, failed_tunnel, and // tactics. @@ -909,7 +1053,6 @@ var baseDialParams = []requestParamSpec{ {"server_entry_region", isRegionCode, requestParamOptional}, {"server_entry_source", isServerEntrySource, requestParamOptional}, {"server_entry_timestamp", isISO8601Date, requestParamOptional}, - {tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME, isAnyString, requestParamOptional}, {"dial_port_number", isIntString, requestParamOptional | requestParamLogStringAsInt}, {"quic_version", isAnyString, requestParamOptional}, {"quic_dial_sni_address", isAnyString, requestParamOptional}, @@ -933,7 +1076,6 @@ var baseDialParams = []requestParamSpec{ {"meek_tls_padding", isIntString, requestParamOptional | requestParamLogStringAsInt}, {"network_latency_multiplier", isFloatString, requestParamOptional | requestParamLogStringAsFloat}, {"client_bpf", isAnyString, requestParamOptional}, - {"network_type", isAnyString, requestParamOptional}, {"conjure_cached", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, {"conjure_delay", isIntString, requestParamOptional | requestParamLogStringAsInt}, {"conjure_transport", isAnyString, requestParamOptional}, @@ -958,12 +1100,62 @@ var baseDialParams = []requestParamSpec{ {"steering_ip", isIPAddress, requestParamOptional | requestParamLogOnlyForFrontedMeekOrConjure}, } -// baseSessionAndDialParams adds baseDialParams to baseSessionParams. -var baseSessionAndDialParams = append( +var inproxyDialParams = []requestParamSpec{ + + // Both the client and broker send inproxy_connection_id, and the values + // must be the same. The broker's value is logged, so the client's value + // is configured here as requestParamNotLogged. + {"inproxy_connection_id", isUnpaddedBase64String, requestParamOptional | requestParamNotLogged}, + {"inproxy_relay_packet", isUnpaddedBase64String, requestParamOptional | requestParamNotLogged}, + {"inproxy_broker_is_replay", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_broker_transport", isAnyString, requestParamOptional}, + {"inproxy_broker_fronting_provider_id", isAnyString, requestParamOptional}, + {"inproxy_broker_dial_address", isAnyString, requestParamOptional}, + {"inproxy_broker_resolved_ip_address", isAnyString, requestParamOptional}, + {"inproxy_broker_sni_server_name", isAnyString, requestParamOptional}, + {"inproxy_broker_host_header", isAnyString, requestParamOptional}, + {"inproxy_broker_transformed_host_name", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_broker_user_agent", isAnyString, requestParamOptional}, + {"inproxy_broker_tls_profile", isAnyString, requestParamOptional}, + {"inproxy_broker_tls_version", isAnyString, requestParamOptional}, + {"inproxy_broker_tls_fragmented", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_broker_client_bpf", isAnyString, requestParamOptional}, + {"inproxy_broker_upstream_bytes_fragmented", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_broker_http_transform", isAnyString, requestParamOptional}, + {"inproxy_broker_dns_preresolved", isAnyString, requestParamOptional}, + {"inproxy_broker_dns_preferred", isAnyString, requestParamOptional}, + {"inproxy_broker_dns_transform", isAnyString, requestParamOptional}, + {"inproxy_broker_dns_attempt", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_dns_preresolved", isAnyString, requestParamOptional}, + {"inproxy_webrtc_dns_preferred", isAnyString, requestParamOptional}, + {"inproxy_webrtc_dns_transform", isAnyString, requestParamOptional}, + {"inproxy_webrtc_dns_attempt", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_stun_server", isAnyString, requestParamOptional}, + {"inproxy_webrtc_stun_server_resolved_ip_address", isAnyString, requestParamOptional}, + {"inproxy_webrtc_stun_server_RFC5780", isAnyString, requestParamOptional}, + {"inproxy_webrtc_stun_server_RFC5780_resolved_ip_address", isAnyString, requestParamOptional}, + {"inproxy_webrtc_randomize_dtls", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_webrtc_padded_messages_sent", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_padded_messages_received", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_decoy_messages_sent", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_decoy_messages_received", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_local_ice_candidate_type", isAnyString, requestParamOptional}, + {"inproxy_webrtc_local_ice_candidate_is_initiator", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_webrtc_local_ice_candidate_is_IPv6", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_webrtc_local_ice_candidate_port", isIntString, requestParamOptional | requestParamLogStringAsInt}, + {"inproxy_webrtc_remote_ice_candidate_type", isAnyString, requestParamOptional}, + {"inproxy_webrtc_remote_ice_candidate_is_IPv6", isBooleanFlag, requestParamOptional | requestParamLogFlagAsBool}, + {"inproxy_webrtc_remote_ice_candidate_port", isIntString, requestParamOptional | requestParamLogStringAsInt}, +} + +// baseAndDialParams adds baseDialParams and inproxyDialParams to baseParams. +var baseAndDialParams = append( append( - []requestParamSpec{}, - baseSessionParams...), - baseDialParams...) + append( + []requestParamSpec{}, + baseParams...), + baseDialParams...), + inproxyDialParams...) func validateRequestParams( config *Config, @@ -1002,14 +1194,14 @@ func validateRequestParams( return nil } -// copyBaseSessionAndDialParams makes a copy of the params which includes only -// the baseSessionAndDialParams. -func copyBaseSessionAndDialParams(params common.APIParameters) common.APIParameters { +// copyBaseAndDialParams makes a copy of the params which includes only +// the baseAndDialParams. +func copyBaseAndDialParams(params common.APIParameters) common.APIParameters { // Note: not a deep copy; assumes baseSessionAndDialParams values are all // scalar types (int, string, etc.) paramsCopy := make(common.APIParameters) - for _, baseParam := range baseSessionAndDialParams { + for _, baseParam := range baseAndDialParams { value := params[baseParam.name] if value == nil { continue @@ -1070,6 +1262,7 @@ func validateStringArrayRequestParam( // the legacy psi_web and current ELK naming conventions. func getRequestLogFields( eventName string, + sessionID string, geoIPData GeoIPData, authorizedAccessTypes []string, params common.APIParameters, @@ -1077,11 +1270,23 @@ func getRequestLogFields( logFields := make(LogFields) + // A sessionID is specified for SSH API requests, where the Psiphon server + // has already received a session ID in the SSH auth payload. In this + // case, use that session ID. + // + // sessionID is "" for other, non-SSH server cases including tactics, + // in-proxy broker, and client-side store and forward events including + // remote server list and failed tunnel. + + if sessionID != "" { + logFields["session_id"] = sessionID + } + if eventName != "" { logFields["event_name"] = eventName } - geoIPData.SetLogFields(logFields) + geoIPData.SetClientLogFields(logFields) if len(authorizedAccessTypes) > 0 { logFields["authorized_access_types"] = authorizedAccessTypes @@ -1383,21 +1588,24 @@ func getStringArrayRequestParam(params common.APIParameters, name string) ([]str if params[name] == nil { return nil, errors.Tracef("missing param: %s", name) } - value, ok := params[name].([]interface{}) - if !ok { - return nil, errors.Tracef("invalid param: %s", name) - } - result := make([]string, len(value)) - for i, v := range value { - strValue, ok := v.(string) - if !ok { - return nil, errors.Tracef("invalid param: %s", name) + switch value := params[name].(type) { + case []string: + return value, nil + case []interface{}: + // JSON unmarshaling may decode the parameter as []interface{}. + result := make([]string, len(value)) + for i, v := range value { + strValue, ok := v.(string) + if !ok { + return nil, errors.Tracef("invalid param: %s", name) + } + result[i] = strValue } - result[i] = strValue + return result, nil + default: + return nil, errors.Tracef("invalid param: %s", name) } - - return result, nil } // Normalize reported client platform. Android clients, for example, report @@ -1426,12 +1634,6 @@ func isMobileClientPlatform(clientPlatform string) bool { // Input validators follow the legacy validations rules in psi_web. -func isServerSecret(config *Config, value string) bool { - return subtle.ConstantTimeCompare( - []byte(value), - []byte(config.WebServerSecret)) == 1 -} - func isHexDigits(_ *Config, value string) bool { // Allows both uppercase in addition to lowercase, for legacy support. return -1 == strings.IndexFunc(value, func(c rune) bool { @@ -1444,6 +1646,11 @@ func isBase64String(_ *Config, value string) bool { return err == nil } +func isUnpaddedBase64String(_ *Config, value string) bool { + _, err := base64.RawStdEncoding.DecodeString(value) + return err == nil +} + func isDigits(_ *Config, value string) bool { return -1 == strings.IndexFunc(value, func(c rune) bool { return c < '0' || c > '9' diff --git a/psiphon/server/config.go b/psiphon/server/config.go index 5de0deb39..af482da91 100644 --- a/psiphon/server/config.go +++ b/psiphon/server/config.go @@ -27,7 +27,6 @@ import ( "encoding/hex" "encoding/json" "encoding/pem" - "fmt" "net" "os" "strconv" @@ -39,6 +38,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/accesscontrol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/ssh" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" @@ -124,36 +124,6 @@ type Config struct { // ServerIPAddress is the public IP address of the server. ServerIPAddress string - // WebServerPort is the listening port of the web server. - // When <= 0, no web server component is run. - WebServerPort int - - // WebServerSecret is the unique secret value that the client - // must supply to make requests to the web server. - WebServerSecret string - - // WebServerCertificate is the certificate the client uses to - // authenticate the web server. - WebServerCertificate string - - // WebServerPrivateKey is the private key the web server uses to - // authenticate itself to clients. - WebServerPrivateKey string - - // WebServerPortForwardAddress specifies the expected network - // address (":") specified in a client's port forward - // HostToConnect and PortToConnect when the client is making a - // tunneled connection to the web server. This address is always - // exempted from validation against SSH_DISALLOWED_PORT_FORWARD_HOSTS - // and AllowTCPPorts. - WebServerPortForwardAddress string - - // WebServerPortForwardRedirectAddress specifies an alternate - // destination address to be substituted and dialed instead of - // the original destination when the port forward destination is - // WebServerPortForwardAddress. - WebServerPortForwardRedirectAddress string - // TunnelProtocolPorts specifies which tunnel protocols to run // and which ports to listen on for each protocol. Valid tunnel // protocols include: @@ -243,6 +213,17 @@ type Config struct { // appear in requests. This is used to defend against abuse. MeekRequiredHeaders map[string]string + // MeekServerCertificate specifies an optional certificate to use for meek + // servers, in place of the default, randomly generate certificate. When + // specified, the corresponding private key must be supplied in + // MeekServerPrivateKey. Any specified certificate is used for all meek + // listeners. + MeekServerCertificate string + + // MeekServerPrivateKey is the private key corresponding to the optional + // MeekServerCertificate parameter. + MeekServerPrivateKey string + // MeekProxyForwardedForHeaders is a list of HTTP headers which // may be added by downstream HTTP proxies or CDNs in front // of clients. These headers supply the original client IP @@ -316,6 +297,11 @@ type Config struct { // is 0. MeekCachedResponsePoolBufferCount int + // MeekCachedResponsePoolBufferClientLimit is the maximum number of of + // shared buffers a single client may consume at once. A default of 32 is + // used when MeekCachedResponsePoolBufferClientLimit is 0. + MeekCachedResponsePoolBufferClientLimit int + // UDPInterceptUdpgwServerAddress specifies the network address of // a udpgw server which clients may be port forwarding to. When // specified, these TCP port forwards are intercepted and handled @@ -459,6 +445,52 @@ type Config struct { // entries are stored on a Psiphon server. OwnEncodedServerEntries map[string]string + // MeekServerRunInproxyBroker indicates whether to run an in-proxy broker + // endpoint and service under the meek server. + MeekServerRunInproxyBroker bool + + // MeekServerInproxyBrokerOnly indicates whether to run only an in-proxy + // broker under the meek server, and not run any meek tunnel protocol. To + // run the meek listener, a meek server protocol and port must still be + // specified in TunnelProtocolPorts, but no other tunnel protocol + // parameters are required. + MeekServerInproxyBrokerOnly bool + + // InproxyBrokerSessionPrivateKey specifies the broker's in-proxy session + // private key and derived public key used by in-proxy clients and + // proxies. This value is required when running an in-proxy broker. + InproxyBrokerSessionPrivateKey string + + // InproxyBrokerObfuscationRootSecret specifies the broker's in-proxy + // session root obfuscation secret used by in-proxy clients and proxies. + // This value is required when running an in-proxy broker. + InproxyBrokerObfuscationRootSecret string + + // InproxyBrokerServerEntrySignaturePublicKey specifies the public key + // used to verify Psiphon server entry signature. This value is required + // when running an in-proxy broker. + InproxyBrokerServerEntrySignaturePublicKey string + + // InproxyBrokerAllowCommonASNMatching overrides the default broker + // matching behavior which doesn't match non-personal in-proxy clients + // and proxies from the same ASN. This parameter is for testing only. + InproxyBrokerAllowCommonASNMatching bool + + // InproxyBrokerAllowBogonWebRTCConnections overrides the default broker + // SDP validation behavior, which doesn't allow private network WebRTC + // candidates. This parameter is for testing only. + InproxyBrokerAllowBogonWebRTCConnections bool + + // InproxyServerSessionPrivateKey specifies the server's in-proxy session + // private key and derived public key used by brokers. This value is + // required when running in-proxy tunnel protocols. + InproxyServerSessionPrivateKey string + + // InproxyServerObfuscationRootSecret specifies the server's in-proxy + // session root obfuscation secret used by brokers. This value is + // required when running in-proxy tunnel protocols. + InproxyServerObfuscationRootSecret string + sshBeginHandshakeTimeout time.Duration sshHandshakeTimeout time.Duration peakUpstreamFailureRateMinimumSampleSize int @@ -499,11 +531,6 @@ func (config *Config) GetLogFileReopenConfig() (int, bool, os.FileMode) { return retries, create, mode } -// RunWebServer indicates whether to run a web server component. -func (config *Config) RunWebServer() bool { - return config.WebServerPort > 0 -} - // RunLoadMonitor indicates whether to monitor and log server load. func (config *Config) RunLoadMonitor() bool { return config.LoadMonitorPeriodSeconds > 0 @@ -570,28 +597,33 @@ func LoadConfig(configJSON []byte) (*Config, error) { return nil, errors.TraceNew("ServerIPAddress is required") } - if config.WebServerPort > 0 && (config.WebServerSecret == "" || config.WebServerCertificate == "" || - config.WebServerPrivateKey == "") { - - return nil, errors.TraceNew( - "Web server requires WebServerSecret, WebServerCertificate, WebServerPrivateKey") - } - - if config.WebServerPortForwardAddress != "" { - if err := validateNetworkAddress(config.WebServerPortForwardAddress, false); err != nil { - return nil, errors.TraceNew("WebServerPortForwardAddress is invalid") + if config.MeekServerRunInproxyBroker { + if config.InproxyBrokerSessionPrivateKey == "" { + return nil, errors.TraceNew("Inproxy Broker requires InproxyBrokerSessionPrivateKey") + } + if config.InproxyBrokerObfuscationRootSecret == "" { + return nil, errors.TraceNew("Inproxy Broker requires InproxyBrokerObfuscationRootSecret") } - } - - if config.WebServerPortForwardRedirectAddress != "" { - if config.WebServerPortForwardAddress == "" { - return nil, errors.TraceNew( - "WebServerPortForwardRedirectAddress requires WebServerPortForwardAddress") + // There must be at least one meek tunnel protocol configured for + // MeekServer to run and host an in-proxy broker. Since each + // MeekServer instance runs its own in-proxy Broker instance, allow + // at most one meek tunnel protocol to be configured so all + // connections to the broker use the same, unambiguous instance. + meekServerCount := 0 + for tunnelProtocol, _ := range config.TunnelProtocolPorts { + if protocol.TunnelProtocolUsesMeek(tunnelProtocol) { + meekServerCount += 1 + } + } + if meekServerCount != 1 { + return nil, errors.TraceNew("Inproxy Broker requires one MeekServer instance") } + } - if err := validateNetworkAddress(config.WebServerPortForwardRedirectAddress, false); err != nil { - return nil, errors.TraceNew("WebServerPortForwardRedirectAddress is invalid") + if config.MeekServerInproxyBrokerOnly { + if !config.MeekServerRunInproxyBroker { + return nil, errors.TraceNew("Inproxy Broker-only mode requires MeekServerRunInproxyBroker") } } @@ -599,6 +631,21 @@ func LoadConfig(configJSON []byte) (*Config, error) { if !common.Contains(protocol.SupportedTunnelProtocols, tunnelProtocol) { return nil, errors.Tracef("Unsupported tunnel protocol: %s", tunnelProtocol) } + + if config.MeekServerInproxyBrokerOnly && protocol.TunnelProtocolUsesMeek(tunnelProtocol) { + // In in-proxy broker-only mode, the TunnelProtocolPorts must be + // specified in order to run the MeekServer, but none of the + // following meek tunnel parameters are required. + continue + } + + if protocol.TunnelProtocolUsesInproxy(tunnelProtocol) && !inproxy.Enabled() { + // Note that, technically, it may be possible to allow this case, + // since PSIPHON_ENABLE_INPROXY is currently required only for + // client/proxy-side WebRTC functionality, although that could change. + return nil, errors.TraceNew("inproxy implementation is not enabled") + } + if protocol.TunnelProtocolUsesSSH(tunnelProtocol) || protocol.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) { if config.SSHPrivateKey == "" || config.SSHServerVersion == "" || @@ -781,9 +828,9 @@ type GenerateConfigParams struct { LogFilename string SkipPanickingLogWriter bool LogLevel string + ServerEntrySignaturePublicKey string + ServerEntrySignaturePrivateKey string ServerIPAddress string - WebServerPort int - EnableSSHAPIRequests bool TunnelProtocolPorts map[string]int TunnelProtocolPassthroughAddresses map[string]string TrafficRulesConfigFilename string @@ -822,12 +869,9 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt } usedPort := make(map[int]bool) - if params.WebServerPort != 0 { - usedPort[params.WebServerPort] = true - } - usingMeek := false usingTLSOSSH := false + usingInproxy := false for tunnelProtocol, port := range params.TunnelProtocolPorts { @@ -848,6 +892,10 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) { usingMeek = true } + + if protocol.TunnelProtocolUsesInproxy(tunnelProtocol) { + usingInproxy = true + } } // One test mode populates the tactics config file; this will generate @@ -858,27 +906,6 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt return nil, nil, nil, nil, nil, errors.TraceNew("invalid tactics parameters") } - // Web server config - - var webServerSecret, webServerCertificate, - webServerPrivateKey, webServerPortForwardAddress string - - if params.WebServerPort != 0 { - webServerSecretBytes, err := common.MakeSecureRandomBytes(WEB_SERVER_SECRET_BYTE_LENGTH) - if err != nil { - return nil, nil, nil, nil, nil, errors.Trace(err) - } - webServerSecret = hex.EncodeToString(webServerSecretBytes) - - webServerCertificate, webServerPrivateKey, err = common.GenerateWebServerCertificate("") - if err != nil { - return nil, nil, nil, nil, nil, errors.Trace(err) - } - - webServerPortForwardAddress = net.JoinHostPort( - params.ServerIPAddress, strconv.Itoa(params.WebServerPort)) - } - // SSH config rsaKey, err := rsa.GenerateKey(rand.Reader, SSH_RSA_HOST_KEY_BITS) @@ -947,6 +974,30 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt meekObfuscatedKey = hex.EncodeToString(meekObfuscatedKeyBytes) } + // Inproxy config + + var inproxyServerSessionPublicKey, + inproxyServerSessionPrivateKey, + inproxyServerObfuscationRootSecret string + + if usingInproxy { + privateKey, err := inproxy.GenerateSessionPrivateKey() + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + inproxyServerSessionPrivateKey = privateKey.String() + publicKey, err := privateKey.GetPublicKey() + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + inproxyServerSessionPublicKey = publicKey.String() + obfuscationRootSecret, err := inproxy.GenerateRootObfuscationSecret() + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + inproxyServerObfuscationRootSecret = obfuscationRootSecret.String() + } + // Other config discoveryValueHMACKeyBytes, err := common.MakeSecureRandomBytes(DISCOVERY_VALUE_KEY_BYTE_LENGTH) @@ -955,6 +1006,14 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt } discoveryValueHMACKey := base64.StdEncoding.EncodeToString(discoveryValueHMACKeyBytes) + // Generate a legacy web server secret, to accomodate test cases, such as deriving + // a server entry tag when no tag is present. + webServerSecretBytes, err := common.MakeSecureRandomBytes(WEB_SERVER_SECRET_BYTE_LENGTH) + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + webServerSecret := hex.EncodeToString(webServerSecretBytes) + // Assemble configs and server entry // Note: this config is intended for either testing or as an illustrative @@ -978,11 +1037,6 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt HostID: "example-host-id", ServerIPAddress: params.ServerIPAddress, DiscoveryValueHMACKey: discoveryValueHMACKey, - WebServerPort: params.WebServerPort, - WebServerSecret: webServerSecret, - WebServerCertificate: webServerCertificate, - WebServerPrivateKey: webServerPrivateKey, - WebServerPortForwardAddress: webServerPortForwardAddress, SSHPrivateKey: string(sshPrivateKey), SSHServerVersion: sshServerVersion, SSHUserName: sshUserName, @@ -1002,6 +1056,8 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt TacticsConfigFilename: params.TacticsConfigFilename, LegacyPassthrough: params.LegacyPassthrough, EnableGQUIC: params.EnableGQUIC, + InproxyServerSessionPrivateKey: inproxyServerSessionPrivateKey, + InproxyServerObfuscationRootSecret: inproxyServerObfuscationRootSecret, } encodedConfig, err := json.MarshalIndent(config, "\n", " ") @@ -1084,15 +1140,9 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt } } - capabilities := []string{} - - if params.EnableSSHAPIRequests { - capabilities = append(capabilities, protocol.CAPABILITY_SSH_API_REQUESTS) - } + // Capabilities - if params.WebServerPort != 0 { - capabilities = append(capabilities, protocol.CAPABILITY_UNTUNNELED_WEB_API_REQUESTS) - } + capabilities := []string{protocol.CAPABILITY_SSH_API_REQUESTS} var frontingProviderID string @@ -1100,24 +1150,34 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt capability := protocol.GetCapability(tunnelProtocol) - // Note: do not add passthrough annotation if HTTP unfronted meek - // because it would result in an invalid capability. - if params.Passthrough && protocol.TunnelProtocolSupportsPassthrough(tunnelProtocol) && tunnelProtocol != protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK { - if !params.LegacyPassthrough { - capability += "-PASSTHROUGH-v2" - } else { - capability += "-PASSTHROUGH" + // In-proxy tunnel protocol capabilities don't include + // v1/-PASSTHROUGHv2 suffixes; see comments in ServerEntry.hasCapability. + if !protocol.TunnelProtocolUsesInproxy(tunnelProtocol) { + + // Note: do not add passthrough annotation if HTTP unfronted meek + // because it would result in an invalid capability. + if params.Passthrough && + protocol.TunnelProtocolSupportsPassthrough(tunnelProtocol) && + tunnelProtocol != protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK { + + if !params.LegacyPassthrough { + capability += "-PASSTHROUGH-v2" + } else { + capability += "-PASSTHROUGH" + } } - } - if tunnelProtocol == protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH && !params.EnableGQUIC { - capability += "v1" + if tunnelProtocol == protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH && + !params.EnableGQUIC { + + capability += "v1" + } } capabilities = append(capabilities, capability) if params.TacticsRequestPublicKey != "" && params.TacticsRequestObfuscatedKey != "" && - protocol.TunnelProtocolUsesMeek(tunnelProtocol) { + protocol.TunnelProtocolSupportsTactics(tunnelProtocol) { capabilities = append(capabilities, protocol.GetTacticsCapability(tunnelProtocol)) } @@ -1127,64 +1187,109 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt } } - sshPort := params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_SSH] - obfuscatedSSHPort := params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH] - obfuscatedSSHQUICPort := params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH] - tlsOSSHPort := params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH] - - // Meek port limitations - // - fronted meek protocols are hard-wired in the client to be port 443 or 80. - // - only one other meek port may be specified. - meekPort := params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK] - if meekPort == 0 { - meekPort = params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS] - } - if meekPort == 0 { - meekPort = params.TunnelProtocolPorts[protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET] + // Tunnel protocol ports + + // Limitations: + // - Only one meek port may be specified per server entry. + // - Neither fronted meek nor Conjuure protocols are supported here. + + var sshPort, obfuscatedSSHPort, meekPort, obfuscatedSSHQUICPort, tlsOSSHPort int + var inproxySSHPort, inproxyOSSHPort, inproxyQUICPort, inproxyMeekPort, inproxyTlsOSSHPort int + + for tunnelProtocol, port := range params.TunnelProtocolPorts { + + if !protocol.TunnelProtocolUsesInproxy(tunnelProtocol) { + switch tunnelProtocol { + case protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH: + tlsOSSHPort = port + case protocol.TUNNEL_PROTOCOL_SSH: + sshPort = port + case protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH: + obfuscatedSSHPort = port + case protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH: + obfuscatedSSHQUICPort = port + case protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, + protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, + protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK: + meekPort = port + } + } else { + switch protocol.TunnelProtocolMinusInproxy(tunnelProtocol) { + case protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH: + inproxyTlsOSSHPort = port + case protocol.TUNNEL_PROTOCOL_SSH: + inproxySSHPort = port + case protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH: + inproxyOSSHPort = port + case protocol.TUNNEL_PROTOCOL_QUIC_OBFUSCATED_SSH: + inproxyQUICPort = port + case protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS, + protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET, + protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK: + inproxyMeekPort = port + } + } } // Note: fronting params are a stub; this server entry will exercise // client and server fronting code paths, but not actually traverse // a fronting hop. - serverEntryWebServerPort := "" - strippedWebServerCertificate := "" - - if params.WebServerPort != 0 { - serverEntryWebServerPort = fmt.Sprintf("%d", params.WebServerPort) - - // Server entry format omits the BEGIN/END lines and newlines - lines := strings.Split(webServerCertificate, "\n") - strippedWebServerCertificate = strings.Join(lines[1:len(lines)-2], "") + serverEntry := &protocol.ServerEntry{ + Tag: prng.Base64String(32), + IpAddress: params.ServerIPAddress, + WebServerSecret: webServerSecret, + TlsOSSHPort: tlsOSSHPort, + SshPort: sshPort, + SshUsername: sshUserName, + SshPassword: sshPassword, + SshHostKey: base64.RawStdEncoding.EncodeToString(sshPublicKey.Marshal()), + SshObfuscatedPort: obfuscatedSSHPort, + SshObfuscatedQUICPort: obfuscatedSSHQUICPort, + LimitQUICVersions: params.LimitQUICVersions, + SshObfuscatedKey: obfuscatedSSHKey, + Capabilities: capabilities, + Region: "US", + ProviderID: strings.ToUpper(prng.HexString(8)), + FrontingProviderID: frontingProviderID, + MeekServerPort: meekPort, + MeekCookieEncryptionPublicKey: meekCookieEncryptionPublicKey, + MeekObfuscatedKey: meekObfuscatedKey, + MeekFrontingHosts: []string{params.ServerIPAddress}, + MeekFrontingAddresses: []string{params.ServerIPAddress}, + MeekFrontingDisableSNI: false, + TacticsRequestPublicKey: tacticsRequestPublicKey, + TacticsRequestObfuscatedKey: tacticsRequestObfuscatedKey, + ConfigurationVersion: 1, + InproxySessionPublicKey: inproxyServerSessionPublicKey, + InproxySessionRootObfuscationSecret: inproxyServerObfuscationRootSecret, + InproxySSHPort: inproxySSHPort, + InproxyOSSHPort: inproxyOSSHPort, + InproxyQUICPort: inproxyQUICPort, + InproxyMeekPort: inproxyMeekPort, + InproxyTlsOSSHPort: inproxyTlsOSSHPort, } - serverEntry := &protocol.ServerEntry{ - IpAddress: params.ServerIPAddress, - WebServerPort: serverEntryWebServerPort, - WebServerSecret: webServerSecret, - WebServerCertificate: strippedWebServerCertificate, - TlsOSSHPort: tlsOSSHPort, - SshPort: sshPort, - SshUsername: sshUserName, - SshPassword: sshPassword, - SshHostKey: base64.RawStdEncoding.EncodeToString(sshPublicKey.Marshal()), - SshObfuscatedPort: obfuscatedSSHPort, - SshObfuscatedQUICPort: obfuscatedSSHQUICPort, - LimitQUICVersions: params.LimitQUICVersions, - SshObfuscatedKey: obfuscatedSSHKey, - Capabilities: capabilities, - Region: "US", - ProviderID: prng.HexString(8), - FrontingProviderID: frontingProviderID, - MeekServerPort: meekPort, - MeekCookieEncryptionPublicKey: meekCookieEncryptionPublicKey, - MeekObfuscatedKey: meekObfuscatedKey, - MeekFrontingHosts: []string{params.ServerIPAddress}, - MeekFrontingAddresses: []string{params.ServerIPAddress}, - MeekFrontingDisableSNI: false, - TacticsRequestPublicKey: tacticsRequestPublicKey, - TacticsRequestObfuscatedKey: tacticsRequestObfuscatedKey, - ConfigurationVersion: 1, + if params.ServerEntrySignaturePublicKey != "" { + serverEntryJSON, err := json.Marshal(serverEntry) + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + var serverEntryFields protocol.ServerEntryFields + err = json.Unmarshal(serverEntryJSON, &serverEntryFields) + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + err = serverEntryFields.AddSignature( + params.ServerEntrySignaturePublicKey, params.ServerEntrySignaturePrivateKey) + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } + + serverEntry, err = serverEntryFields.GetServerEntry() + if err != nil { + return nil, nil, nil, nil, nil, errors.Trace(err) + } } encodedServerEntry, err := protocol.EncodeServerEntry(serverEntry) diff --git a/psiphon/server/discovery/classic.go b/psiphon/server/discovery/classic.go index e6ad7d401..4f0fc144e 100644 --- a/psiphon/server/discovery/classic.go +++ b/psiphon/server/discovery/classic.go @@ -83,6 +83,12 @@ func (c *classicDiscovery) selectServers(clientIP net.IP) []*psinet.DiscoverySer // discoverServers selects new encoded server entries to be "discovered" by // the client, using the discoveryValue -- a function of the client's IP // address -- as the input into the discovery algorithm. +// +// Warning: if discoverServers is called as the set of discoverable servers +// changes, i.e. a new server becomes un/discoverable, then there's a remote +// possibility that discoverServers returns nil because of a race between +// the timer that updates c.buckets firing and discoverServers obtaining a +// reference to the value of c.buckets. func (c *classicDiscovery) discoverServers(discoveryValue int) []*psinet.DiscoveryServer { discoveryDate := c.clk.Now().UTC() @@ -96,6 +102,7 @@ func (c *classicDiscovery) discoverServers(discoveryValue int) []*psinet.Discove } timeInSeconds := int(discoveryDate.Unix()) + // TODO: ensure that each server in buckets is discoverable on discoveryDate. servers := selectServers(buckets, timeInSeconds, discoveryValue, discoveryDate) return servers @@ -116,6 +123,11 @@ func (c *classicDiscovery) discoverServers(discoveryValue int) []*psinet.Discove // both aspects determine which server is selected. IP address is given the // priority: if there are only a couple of servers, for example, IP address alone // determines the outcome. +// +// Warning: If discoveryDate does not fall within the discovery date range of the +// selected server, then nil will be returned. For this reason, an attempt should +// be made to ensure that buckets only contains discovery servers that are +// discoverable on discoveryDate. func selectServers( buckets [][]*psinet.DiscoveryServer, timeInSeconds, diff --git a/psiphon/server/geoip.go b/psiphon/server/geoip.go index d05d7d202..f088ed2f3 100644 --- a/psiphon/server/geoip.go +++ b/psiphon/server/geoip.go @@ -32,11 +32,9 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" maxminddb "github.com/oschwald/maxminddb-golang" - cache "github.com/patrickmn/go-cache" ) const ( - GEOIP_SESSION_CACHE_TTL = 60 * time.Minute GEOIP_UNKNOWN_VALUE = "None" GEOIP_DATABASE_TYPE_ISP = "GeoIP2-ISP" ) @@ -65,22 +63,35 @@ func NewGeoIPData() GeoIPData { } } -// SetLogFields adds the GeoIPData fields to LogFields, following Psiphon -// metric field name and format conventions. -func (g GeoIPData) SetLogFields(logFields LogFields) { - g.SetLogFieldsWithPrefix("", logFields) +// SetClientLogFields adds the GeoIPData fields to LogFields, following +// Psiphon field name and format conventions. For example, GeoIPData.Country +// is logged as client_region. +func (g GeoIPData) SetClientLogFields(logFields LogFields) { + g.SetClientLogFieldsWithPrefix("", logFields) } -func (g GeoIPData) SetLogFieldsWithPrefix(prefix string, logFields LogFields) { +// SetClientLogFieldsWithPrefix adds the GeoIPData fields to LogFields, +// following Psiphon field name and format conventions and with the specified +// prefix. For example, GeoIPData.Country is logged as +// duplicate_authorization_client_region for the prefix "duplicate_authorization_". +func (g GeoIPData) SetClientLogFieldsWithPrefix(prefix string, logFields LogFields) { + g.SetLogFieldsWithPrefix(prefix, "client", logFields) +} + +// SetLogFieldsWithPrefix adds the GeoIPData fields to LogFields, following +// Psiphon field name and format conventions and with the specified prefix +// and name. For example, GeoIPData.Country is logged as proxy_region for the +// prefix "" and name "proxy". +func (g GeoIPData) SetLogFieldsWithPrefix(prefix string, name string, logFields LogFields) { // In psi_web, the space replacement was done to accommodate space // delimited logging, which is no longer required; we retain the // transformation so that stats aggregation isn't impacted. - logFields[prefix+"client_region"] = strings.Replace(g.Country, " ", "_", -1) - logFields[prefix+"client_city"] = strings.Replace(g.City, " ", "_", -1) - logFields[prefix+"client_isp"] = strings.Replace(g.ISP, " ", "_", -1) - logFields[prefix+"client_asn"] = strings.Replace(g.ASN, " ", "_", -1) - logFields[prefix+"client_aso"] = strings.Replace(g.ASO, " ", "_", -1) + logFields[fmt.Sprintf("%s%s_region", prefix, name)] = strings.Replace(g.Country, " ", "_", -1) + logFields[fmt.Sprintf("%s%s_city", prefix, name)] = strings.Replace(g.City, " ", "_", -1) + logFields[fmt.Sprintf("%s%s_isp", prefix, name)] = strings.Replace(g.ISP, " ", "_", -1) + logFields[fmt.Sprintf("%s%s_asn", prefix, name)] = strings.Replace(g.ASN, " ", "_", -1) + logFields[fmt.Sprintf("%s%s_aso", prefix, name)] = strings.Replace(g.ASO, " ", "_", -1) } // GeoIPService implements GeoIP lookup and session/GeoIP caching. @@ -88,8 +99,7 @@ func (g GeoIPData) SetLogFieldsWithPrefix(prefix string, logFields LogFields) { // supports hot reloading of MaxMind data while the server is // running. type GeoIPService struct { - databases []*geoIPDatabase - sessionCache *cache.Cache + databases []*geoIPDatabase } type geoIPDatabase struct { @@ -105,8 +115,7 @@ type geoIPDatabase struct { func NewGeoIPService(databaseFilenames []string) (*GeoIPService, error) { geoIP := &GeoIPService{ - databases: make([]*geoIPDatabase, len(databaseFilenames)), - sessionCache: cache.New(GEOIP_SESSION_CACHE_TTL, 1*time.Minute), + databases: make([]*geoIPDatabase, len(databaseFilenames)), } for i, filename := range databaseFilenames { @@ -279,44 +288,3 @@ func (geoIP *GeoIPService) lookupIP(IP net.IP, ISPOnly bool) GeoIPData { return result } - -// SetSessionCache adds the sessionID/geoIPData pair to the -// session cache. This value will not expire; the caller must -// call MarkSessionCacheToExpire to initiate expiry. -// Calling SetSessionCache for an existing sessionID will -// replace the previous value and reset any expiry. -func (geoIP *GeoIPService) SetSessionCache(sessionID string, geoIPData GeoIPData) { - geoIP.sessionCache.Set(sessionID, geoIPData, cache.NoExpiration) -} - -// MarkSessionCacheToExpire initiates expiry for an existing -// session cache entry, if the session ID is found in the cache. -// Concurrency note: SetSessionCache and MarkSessionCacheToExpire -// should not be called concurrently for a single session ID. -func (geoIP *GeoIPService) MarkSessionCacheToExpire(sessionID string) { - geoIPData, found := geoIP.sessionCache.Get(sessionID) - // Note: potential race condition between Get and Set. In practice, - // the tunnel server won't clobber a SetSessionCache value by calling - // MarkSessionCacheToExpire concurrently. - if found { - geoIP.sessionCache.Set(sessionID, geoIPData, cache.DefaultExpiration) - } -} - -// GetSessionCache returns the cached GeoIPData for the -// specified session ID; a blank GeoIPData is returned -// if the session ID is not found in the cache. -func (geoIP *GeoIPService) GetSessionCache(sessionID string) GeoIPData { - geoIPData, found := geoIP.sessionCache.Get(sessionID) - if !found { - return NewGeoIPData() - } - return geoIPData.(GeoIPData) -} - -// InSessionCache returns whether the session ID is present -// in the session cache. -func (geoIP *GeoIPService) InSessionCache(sessionID string) bool { - _, found := geoIP.sessionCache.Get(sessionID) - return found -} diff --git a/psiphon/server/listener.go b/psiphon/server/listener.go index e6d01de16..735cebd6e 100644 --- a/psiphon/server/listener.go +++ b/psiphon/server/listener.go @@ -38,6 +38,9 @@ var errRestrictedProvider = std_errors.New("restricted provider") // limited to GeoIP attributes as the client has not yet sent API parameters. // GeoIP uses the immediate peer IP, and so TacticsListener is suitable only // for tactics that do not require the original client GeoIP when fronted. +// +// In the case of inproxy tunnel protocols, the peer IP is the inproxy proxy +// IP, and the tactics apply to the 2nd hop. type TacticsListener struct { net.Listener support *SupportServices @@ -155,7 +158,7 @@ func (listener *TacticsListener) accept() (net.Conn, error) { replaySeed, doReplay := listener.support.ReplayCache.GetReplayFragmentor( listener.tunnelProtocol, geoIPData) - if listener.tunnelProtocol == protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH { + if protocol.TunnelProtocolIsObfuscatedSSH(listener.tunnelProtocol) { replaySeed = nil doReplay = true } diff --git a/psiphon/server/log.go b/psiphon/server/log.go index cf80abf10..c5c477161 100644 --- a/psiphon/server/log.go +++ b/psiphon/server/log.go @@ -163,6 +163,10 @@ func (logger *commonLogger) LogMetric(metric string, fields common.LogFields) { logger.traceLogger.LogRawFieldsWithTimestamp(LogFields(fields)) } +func (logger *commonLogger) IsLogLevelDebug() bool { + return logger.traceLogger.Level == logrus.DebugLevel +} + // CommonLogger wraps a TraceLogger instance with an interface that conforms // to common.Logger. This is used to make the TraceLogger available to other // packages that don't import the "server" package. @@ -332,7 +336,7 @@ func InitLogging(config *Config) (retErr error) { } func IsLogLevelDebug() bool { - return log.Logger.Level == logrus.DebugLevel + return log.Level == logrus.DebugLevel } func init() { diff --git a/psiphon/server/meek.go b/psiphon/server/meek.go index 859236fdd..a0997f2e5 100644 --- a/psiphon/server/meek.go +++ b/psiphon/server/meek.go @@ -23,13 +23,15 @@ import ( "bytes" "context" "crypto/rand" - std_tls "crypto/tls" + "crypto/subtle" + "crypto/tls" "encoding/base64" "encoding/hex" "encoding/json" std_errors "errors" "hash/crc64" "io" + "io/ioutil" "net" "net/http" "runtime" @@ -39,14 +41,16 @@ import ( "sync/atomic" "time" - tls "github.com/Psiphon-Labs/psiphon-tls" + psiphon_tls "github.com/Psiphon-Labs/psiphon-tls" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/monotime" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/obfuscator" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values" lrucache "github.com/cognusion/go-cache-lru" @@ -90,6 +94,9 @@ const ( MEEK_DEFAULT_RESPONSE_BUFFER_LENGTH = 65536 MEEK_DEFAULT_POOL_BUFFER_LENGTH = 65536 MEEK_DEFAULT_POOL_BUFFER_COUNT = 2048 + MEEK_DEFAULT_POOL_BUFFER_CLIENT_LIMIT = 32 + MEEK_ENDPOINT_MAX_REQUEST_PAYLOAD_LENGTH = 65536 + MEEK_MAX_SESSION_COUNT = 1000000 ) // MeekServer implements the meek protocol, which tunnels TCP traffic (in the case of Psiphon, @@ -115,10 +122,11 @@ type MeekServer struct { skipExtendedTurnAroundThreshold int maxSessionStaleness time.Duration httpClientIOTimeout time.Duration - tlsConfig *tls.Config + stdTLSConfig *tls.Config + psiphonTLSConfig *psiphon_tls.Config obfuscatorSeedHistory *obfuscator.SeedHistory clientHandler func(clientConn net.Conn, data *additionalTransportData) - openConns *common.Conns + openConns *common.Conns[net.Conn] stopBroadcast <-chan struct{} sessionsLock sync.RWMutex sessions map[string]*meekSession @@ -129,6 +137,7 @@ type MeekServer struct { rateLimitCount int rateLimitSignalGC chan struct{} normalizer *transforms.HTTPNormalizerListener + inproxyBroker *inproxy.Broker } // NewMeekServer initializes a new meek server. @@ -141,6 +150,17 @@ func NewMeekServer( clientHandler func(clientConn net.Conn, data *additionalTransportData), stopBroadcast <-chan struct{}) (*MeekServer, error) { + // With fronting, MeekRequiredHeaders can be used to ensure that the + // request is coming through a CDN that's configured to add the + // specified, secret header values. Configuring the MeekRequiredHeaders + // scheme is required when running an in-proxy broker. + if isFronted && + support.Config.MeekServerRunInproxyBroker && + len(support.Config.MeekRequiredHeaders) < 1 { + + return nil, errors.TraceNew("missing required header") + } + passthroughAddress := support.Config.TunnelProtocolPassthroughAddresses[listenerTunnelProtocol] turnAroundTimeout := MEEK_DEFAULT_TURN_AROUND_TIMEOUT @@ -198,6 +218,11 @@ func NewMeekServer( bufferCount = support.Config.MeekCachedResponsePoolBufferCount } + bufferPoolClientLimit := MEEK_DEFAULT_POOL_BUFFER_CLIENT_LIMIT + if support.Config.MeekCachedResponsePoolBufferClientLimit != 0 { + bufferPoolClientLimit = support.Config.MeekCachedResponsePoolBufferClientLimit + } + _, thresholdSeconds, _, _, _, _, _, _, reapFrequencySeconds, maxEntries := support.TrafficRulesSet.GetMeekRateLimiterConfig() @@ -206,7 +231,14 @@ func NewMeekServer( time.Duration(reapFrequencySeconds)*time.Second, maxEntries) - bufferPool := NewCachedResponseBufferPool(bufferLength, bufferCount) + bufferPool := NewCachedResponseBufferPool( + bufferLength, bufferCount, bufferPoolClientLimit) + + // Limitation: rate limiting and resource limiting are handled by external + // components, and MeekServer enforces only a sanity check limit on the + // number the number of entries in MeekServer.sessions. + // + // See comment in newSSHServer for more details. meekServer := &MeekServer{ support: support, @@ -222,7 +254,7 @@ func NewMeekServer( httpClientIOTimeout: httpClientIOTimeout, obfuscatorSeedHistory: obfuscator.NewSeedHistory(nil), clientHandler: clientHandler, - openConns: common.NewConns(), + openConns: common.NewConns[net.Conn](), stopBroadcast: stopBroadcast, sessions: make(map[string]*meekSession), checksumTable: checksumTable, @@ -232,12 +264,45 @@ func NewMeekServer( } if useTLS { - tlsConfig, err := meekServer.makeMeekTLSConfig( - isFronted, useObfuscatedSessionTickets) - if err != nil { - return nil, errors.Trace(err) + + // For fronted meek servers, crypto/tls is used to ensure that + // net/http.Server.Serve will find *crypto/tls.Conn types, as + // required for enabling HTTP/2. The fronted case does not not + // support or require the TLS passthrough or obfuscated session + // ticket mechanisms, which are implemented in psiphon-tls. HTTP/2 is + // preferred for fronted meek servers in order to multiplex many + // concurrent requests, either from many tunnel clients or + // many/individual in-proxy broker clients, over a single network + // connection. + // + // For direct meek servers, psiphon-tls is used to provide the TLS + // passthrough or obfuscated session ticket obfuscation mechanisms. + // Direct meek servers do not enable HTTP/1.1 Each individual meek + // tunnel client will have its own network connection and each client + // has only a single in-flight meek request at a time. + + if isFronted { + + if useObfuscatedSessionTickets { + return nil, errors.TraceNew("obfuscated session tickets unsupported") + } + if meekServer.passthroughAddress != "" { + return nil, errors.TraceNew("passthrough unsupported") + } + tlsConfig, err := meekServer.makeFrontedMeekTLSConfig() + if err != nil { + return nil, errors.Trace(err) + } + meekServer.stdTLSConfig = tlsConfig + } else { + + tlsConfig, err := meekServer.makeDirectMeekTLSConfig( + useObfuscatedSessionTickets) + if err != nil { + return nil, errors.Trace(err) + } + meekServer.psiphonTLSConfig = tlsConfig } - meekServer.tlsConfig = tlsConfig } if useHTTPNormalizer && protocol.TunnelProtocolUsesMeekHTTPNormalizer(listenerTunnelProtocol) { @@ -247,9 +312,86 @@ func NewMeekServer( meekServer.listener = normalizer } + // Initialize in-proxy broker service + + if support.Config.MeekServerRunInproxyBroker { + + if !inproxy.Enabled() { + // Note that, technically, it may be possible to allow this case, + // since PSIPHON_ENABLE_INPROXY is currently required only for + // client/proxy-side WebRTC functionality, although that could change. + return nil, errors.TraceNew("inproxy implementation is not enabled") + } + + if support.Config.InproxyBrokerAllowCommonASNMatching { + inproxy.SetAllowCommonASNMatching(true) + } + + if support.Config.InproxyBrokerAllowBogonWebRTCConnections { + inproxy.SetAllowBogonWebRTCConnections(true) + } + + sessionPrivateKey, err := inproxy.SessionPrivateKeyFromString( + support.Config.InproxyBrokerSessionPrivateKey) + if err != nil { + return nil, errors.Trace(err) + } + + obfuscationRootSecret, err := inproxy.ObfuscationSecretFromString( + support.Config.InproxyBrokerObfuscationRootSecret) + if err != nil { + return nil, errors.Trace(err) + } + + lookupGeoIPData := func(IP string) common.GeoIPData { + return common.GeoIPData(support.GeoIPService.Lookup(IP)) + } + + inproxyBroker, err := inproxy.NewBroker( + &inproxy.BrokerConfig{ + Logger: CommonLogger(log), + AllowProxy: meekServer.inproxyBrokerAllowProxy, + AllowClient: meekServer.inproxyBrokerAllowClient, + AllowDomainFrontedDestinations: meekServer.inproxyBrokerAllowDomainFrontedDestinations, + LookupGeoIP: lookupGeoIPData, + APIParameterValidator: getInproxyBrokerAPIParameterValidator(support.Config), + APIParameterLogFieldFormatter: getInproxyBrokerAPIParameterLogFieldFormatter(), + IsValidServerEntryTag: support.PsinetDatabase.IsValidServerEntryTag, + GetTactics: meekServer.inproxyBrokerGetTactics, + PrivateKey: sessionPrivateKey, + ObfuscationRootSecret: obfuscationRootSecret, + ServerEntrySignaturePublicKey: support.Config.InproxyBrokerServerEntrySignaturePublicKey, + }) + if err != nil { + return nil, errors.Trace(err) + } + + meekServer.inproxyBroker = inproxyBroker + + // inproxyReloadTactics initializes compartment ID, timeouts, and + // other broker parameter values from tactics. + err = meekServer.inproxyReloadTactics() + if err != nil { + return nil, errors.Trace(err) + } + + } + return meekServer, nil } +// ReloadTactics signals components to reload tactics and reinitialize as +// required when tactics may have changed. +func (server *MeekServer) ReloadTactics() error { + if server.support.Config.MeekServerRunInproxyBroker { + err := server.inproxyReloadTactics() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + type meekContextKey struct { key string } @@ -287,14 +429,30 @@ func (server *MeekServer) Run() error { server.rateLimitWorker() }() + if server.inproxyBroker != nil { + err := server.inproxyBroker.Start() + if err != nil { + return errors.Trace(err) + } + defer server.inproxyBroker.Stop() + } + // Serve HTTP or HTTPS // // - WriteTimeout may include time awaiting request, as per: // https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts + // // - Legacy meek-server wrapped each client HTTP connection with an explicit idle // timeout net.Conn and didn't use http.Server timeouts. We could do the same // here (use ActivityMonitoredConn) but the stock http.Server timeouts should // now be sufficient. + // + // - HTTP/2 is enabled (the default), which is required for efficient + // in-proxy broker connection sharing. + // + // - Any CDN fronting a meek server running an in-proxy broker should be + // configured with timeouts that accomodate the proxy announcement + // request long polling. httpServer := &http.Server{ ReadTimeout: server.httpClientIOTimeout, @@ -304,19 +462,19 @@ func (server *MeekServer) Run() error { ConnContext: func(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, meekNetConnContextKey, conn) }, - - // Disable auto HTTP/2 (https://golang.org/doc/go1.6) - TLSNextProto: make(map[string]func(*http.Server, *std_tls.Conn, http.Handler)), } - // Note: Serve() will be interrupted by listener.Close() call - var err error - if server.tlsConfig != nil { - httpsServer := HTTPSServer{Server: httpServer} - err = httpsServer.ServeTLS(server.listener, server.tlsConfig) - } else { - err = httpServer.Serve(server.listener) + // Note: Serve() will be interrupted by server.listener.Close() call + listener := server.listener + if server.stdTLSConfig != nil { + listener = tls.NewListener(server.listener, server.stdTLSConfig) + } else if server.psiphonTLSConfig != nil { + listener = psiphon_tls.NewListener(server.listener, server.psiphonTLSConfig) + + // Disable auto HTTP/2 + httpServer.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } + err := httpServer.Serve(listener) // Can't check for the exact error that Close() will cause in Accept(), // (see: https://code.google.com/p/go/issues/detail?id=4373). So using an @@ -355,31 +513,55 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request if len(server.support.Config.MeekRequiredHeaders) > 0 { for header, value := range server.support.Config.MeekRequiredHeaders { requestValue := request.Header.Get(header) - if requestValue != value { + + // There's no ConstantTimeCompare for strings. While the + // conversion from string to byte slice may leak the length of + // the expected value, ConstantTimeCompare also takes time that's + // a function of the length of the input byte slices; leaking the + // expected value length isn't a vulnerability as long as the + // secret is long enough and random. + if subtle.ConstantTimeCompare([]byte(requestValue), []byte(value)) != 1 { log.WithTraceFields(LogFields{ "header": header, "value": requestValue, }).Warning("invalid required meek header") + common.TerminateHTTPConnection(responseWriter, request) return } } } - // Check for the expected meek/session ID cookie. - // Also check for prohibited HTTP headers. + // Check for the expected meek/session ID cookie. in-proxy broker requests + // do not use or expect a meek cookie (the broker session protocol + // encapsulated in the HTTP request/response payloads has its own + // obfuscation and anti-replay mechanisms). + // + // TODO: log irregular tunnels for unexpected cookie cases? var meekCookie *http.Cookie for _, c := range request.Cookies() { meekCookie = c break } - if meekCookie == nil || len(meekCookie.Value) == 0 { + + if (meekCookie == nil || len(meekCookie.Value) == 0) && + !server.support.Config.MeekServerRunInproxyBroker { + log.WithTrace().Warning("missing meek cookie") common.TerminateHTTPConnection(responseWriter, request) return } + if meekCookie != nil && server.support.Config.MeekServerInproxyBrokerOnly { + + log.WithTrace().Warning("unexpected meek cookie") + common.TerminateHTTPConnection(responseWriter, request) + return + } + + // Check for prohibited HTTP headers. + if len(server.support.Config.MeekProhibitedHeaders) > 0 { for _, header := range server.support.Config.MeekProhibitedHeaders { value := request.Header.Get(header) @@ -403,11 +585,32 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request // // 3. A request to an endpoint. This meek connection is not for relaying // tunnel traffic. Instead, the request is handed off to a custom handler. + // + // In the in-proxy broker case, there is no meek cookie, which avoids the + // size and resource overhead of sending and processing a meek cookie + // with each endpoint request. + // + // The broker session protocol encapsulated in the HTTP request/response + // payloads has its own obfuscation and anti-replay mechanisms. + // + // In RunInproxyBroker mode, non-meek cookie requests are routed to the + // in-proxy broker. getSessionOrEndpoint is still invoked in all cases, + // to process GeoIP headers, invoke the meek rate limiter, etc. + // + // Limitations: + // + // - Adding arbirary cookies, as camouflage for plain HTTP for example, is + // not supported. + // + // - the HTTP normalizer depends on the meek cookie + // (see makeMeekHTTPNormalizerListener) so RunInproxyBroker mode is + // incompatible with the HTTP normalizer. sessionID, session, underlyingConn, endPoint, + endPointClientIP, endPointGeoIPData, err := server.getSessionOrEndpoint(request, meekCookie) @@ -421,15 +624,40 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request if endPoint != "" { - // Endpoint mode. Currently, this means it's handled by the tactics - // request handler. + // Route to endpoint handlers and return. + + handled := false + + switch endPoint { + case tactics.TACTICS_END_POINT, tactics.SPEED_TEST_END_POINT: + handled = server.support.TacticsServer.HandleEndPoint( + endPoint, + common.GeoIPData(*endPointGeoIPData), + responseWriter, + request) + // Currently, TacticsServer.HandleEndPoint handles returning a 404 instead + // leaving that up to server.handleError. + // + // TODO: call server.handleError, for its isFronting special case. + + case inproxy.BrokerEndPointName: + handled = true + err := server.inproxyBrokerHandler( + endPointClientIP, + common.GeoIPData(*endPointGeoIPData), + responseWriter, + request) + if err != nil { + log.WithTraceFields(LogFields{"error": err}).Warning("inproxyBrokerHandler failed") + server.handleError(responseWriter, request) + } + } - handled := server.support.TacticsServer.HandleEndPoint( - endPoint, common.GeoIPData(*endPointGeoIPData), responseWriter, request) if !handled { - log.WithTraceFields(LogFields{"endPoint": endPoint}).Info("unhandled endpoint") + log.WithTraceFields(LogFields{"endPoint": endPoint}).Warning("unhandled endpoint") server.handleError(responseWriter, request) } + return } @@ -570,12 +798,8 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request responseWriter.WriteHeader(http.StatusPartialContent) - // TODO: - // - enforce a max extended buffer count per client, for - // fairness? Throttling may make this unnecessary. - // - cachedResponse can now start releasing extended buffers, - // as response bytes before "position" will never be requested - // again? + // TODO: cachedResponse can now start releasing extended buffers, as + // response bytes before "position" will never be requested again? responseSize, responseError = session.cachedResponse.CopyFromPosition(position, responseWriter) greaterThanSwapInt64(&session.metricPeakCachedResponseHitSize, int64(responseSize)) @@ -605,6 +829,19 @@ func (server *MeekServer) ServeHTTP(responseWriter http.ResponseWriter, request // pumpWrites causes a TunnelServer/SSH goroutine blocking on a Write to // write its downstream traffic through to the response body. + // Limitation: pumpWrites may write more response bytes than can be + // cached for future retries, either due to no extended buffers + // available, or exceeding the per-client extended buffer limit. In + // practice, with throttling in place and servers running under load + // limiting, metrics indicate that this rarely occurs. A potential + // future enhancement could be for pumpWrites to stop writing and + // send the response once there's no buffers remaining, favoring + // connection resilience over performance. + // + // TODO: use geo-targeted per-client extended buffer limit to reserve + // extended cache buffers for regions or ISPs with active or expected + // network connection interruptions? + responseSize, responseError = session.clientConn.pumpWrites(multiWriter, skipExtendedTurnAround) greaterThanSwapInt64(&session.metricPeakResponseSize, int64(responseSize)) greaterThanSwapInt64(&session.metricPeakCachedResponseSize, int64(session.cachedResponse.Available())) @@ -670,27 +907,39 @@ func checkRangeHeader(request *http.Request) (int, bool) { // meek cookie. A new session is created when the meek cookie indicates relay // mode; or the endpoint is returned when the meek cookie indicates endpoint // mode. +// +// For performance reasons, in-proxy broker requests are allowed to omit the +// meek cookie and pass in nil for meekCookie; getSessionOrEndpoint still +// performs rate limiting and header handling for the in-proxy broker case. func (server *MeekServer) getSessionOrEndpoint( - request *http.Request, meekCookie *http.Cookie) (string, *meekSession, net.Conn, string, *GeoIPData, error) { + request *http.Request, + meekCookie *http.Cookie) (string, *meekSession, net.Conn, string, string, *GeoIPData, error) { underlyingConn := request.Context().Value(meekNetConnContextKey).(net.Conn) - // Check for an existing session. + // Check for an existing meek tunnel session. - server.sessionsLock.RLock() - existingSessionID := meekCookie.Value - session, ok := server.sessions[existingSessionID] - server.sessionsLock.RUnlock() - if ok { - // TODO: can multiple http client connections using same session cookie - // cause race conditions on session struct? - session.touch() - return existingSessionID, session, underlyingConn, "", nil, nil + if meekCookie != nil { + + server.sessionsLock.RLock() + existingSessionID := meekCookie.Value + session, ok := server.sessions[existingSessionID] + server.sessionsLock.RUnlock() + if ok { + // TODO: can multiple http client connections using same session cookie + // cause race conditions on session struct? + session.touch() + return existingSessionID, session, underlyingConn, "", "", nil, nil + } } - // Determine the client remote address, which is used for geolocation - // stats, rate limiting, anti-probing, discovery, and tactics selection - // logic. + // TODO: rename clientIP to peerIP to reflect the new terminology used in + // psiphon/server code where the immediate peer may be an in-proxy proxy, + // not the client. + + // Determine the client or peer remote address, which is used for + // geolocation stats, rate limiting, anti-probing, discovery, and tactics + // selection logic. // // When an intermediate proxy or CDN is in use, we may be // able to determine the original client address by inspecting HTTP @@ -703,10 +952,10 @@ func (server *MeekServer) getSessionOrEndpoint( clientIP, _, err := net.SplitHostPort(request.RemoteAddr) if err != nil { - return "", nil, nil, "", nil, errors.Trace(err) + return "", nil, nil, "", "", nil, errors.Trace(err) } if net.ParseIP(clientIP) == nil { - return "", nil, nil, "", nil, errors.TraceNew("invalid IP address") + return "", nil, nil, "", "", nil, errors.TraceNew("invalid IP address") } if server.isFronted && len(server.support.Config.MeekProxyForwardedForHeaders) > 0 { @@ -784,6 +1033,9 @@ func (server *MeekServer) getSessionOrEndpoint( if server.normalizer != nil { + // Limitation: RunInproxyBroker mode with no meek cookies is not + // compatible with the HTTP normalizer. + // NOTE: operates on the assumption that the normalizer is not wrapped // with a further conn. underlyingConn := request.Context().Value(meekNetConnContextKey).(net.Conn) @@ -792,9 +1044,12 @@ func (server *MeekServer) getSessionOrEndpoint( } else { - payloadJSON, err = server.getMeekCookiePayload(clientIP, meekCookie.Value) - if err != nil { - return "", nil, nil, "", nil, errors.Trace(err) + if meekCookie != nil { + + payloadJSON, err = server.getMeekCookiePayload(clientIP, meekCookie.Value) + if err != nil { + return "", nil, nil, "", "", nil, errors.Trace(err) + } } } @@ -802,9 +1057,17 @@ func (server *MeekServer) getSessionOrEndpoint( // and PsiphonServerAddress. var clientSessionData protocol.MeekCookieData - err = json.Unmarshal(payloadJSON, &clientSessionData) - if err != nil { - return "", nil, nil, "", nil, errors.Trace(err) + if meekCookie != nil { + + err = json.Unmarshal(payloadJSON, &clientSessionData) + if err != nil { + return "", nil, nil, "", "", nil, errors.Trace(err) + } + + } else { + + // Assume the in-proxy broker endpoint when there's no meek cookie. + clientSessionData.EndPoint = inproxy.BrokerEndPointName } // Any rate limit is enforced after the meek cookie is validated, so a prober @@ -812,9 +1075,16 @@ func (server *MeekServer) getSessionOrEndpoint( // based on response time combined with the rate limit configuration. The // rate limit is primarily intended to limit memory resource consumption and // not the overhead incurred by cookie validation. + // + // The meek rate limit is applied to new meek tunnel sessions and tactics + // requests, both of which may reasonably be limited to as low as 1 event + // per time period. The in-proxy broker is excluded from meek rate + // limiting since it has its own rate limiter and in-proxy requests are + // allowed to be more frequent. - if server.rateLimit(clientIP, geoIPData, server.listenerTunnelProtocol) { - return "", nil, nil, "", nil, errors.TraceNew("rate limit exceeded") + if clientSessionData.EndPoint != inproxy.BrokerEndPointName && + server.rateLimit(clientIP, geoIPData, server.listenerTunnelProtocol) { + return "", nil, nil, "", "", nil, errors.TraceNew("rate limit exceeded") } // Handle endpoints before enforcing CheckEstablishTunnels. @@ -822,7 +1092,13 @@ func (server *MeekServer) getSessionOrEndpoint( // handled by servers which would otherwise reject new tunnels. if clientSessionData.EndPoint != "" { - return "", nil, nil, clientSessionData.EndPoint, &geoIPData, nil + return "", nil, nil, clientSessionData.EndPoint, clientIP, &geoIPData, nil + } + + // After this point, for the meek tunnel new session case, a meek cookie + // is required and meekCookie must not be nil. + if meekCookie == nil { + return "", nil, nil, "", "", nil, errors.TraceNew("missing meek cookie") } // Don't create new sessions when not establishing. A subsequent SSH handshake @@ -830,7 +1106,7 @@ func (server *MeekServer) getSessionOrEndpoint( if server.support.TunnelServer != nil && !server.support.TunnelServer.CheckEstablishTunnels() { - return "", nil, nil, "", nil, errors.TraceNew("not establishing tunnels") + return "", nil, nil, "", "", nil, errors.TraceNew("not establishing tunnels") } // Disconnect immediately if the tactics for the client restricts usage of @@ -854,7 +1130,7 @@ func (server *MeekServer) getSessionOrEndpoint( p, err := server.support.ServerTacticsParametersCache.Get(geoIPData) if err != nil { - return "", nil, nil, "", nil, errors.Trace(err) + return "", nil, nil, "", "", nil, errors.Trace(err) } if !p.IsNil() && @@ -863,7 +1139,7 @@ func (server *MeekServer) getSessionOrEndpoint( server.support.Config.GetFrontingProviderID()) { if p.WeightedCoinFlip( parameters.RestrictFrontingProviderIDsServerProbability) { - return "", nil, nil, "", nil, errors.TraceNew("restricted fronting provider") + return "", nil, nil, "", "", nil, errors.TraceNew("restricted fronting provider") } } } @@ -883,7 +1159,7 @@ func (server *MeekServer) getSessionOrEndpoint( server.listenerTunnelProtocol, server.support.Config.GetRunningProtocols()) { - return "", nil, nil, "", nil, errors.Tracef( + return "", nil, nil, "", "", nil, errors.Tracef( "invalid client tunnel protocol: %s", clientSessionData.ClientTunnelProtocol) } @@ -898,12 +1174,17 @@ func (server *MeekServer) getSessionOrEndpoint( } cachedResponse := NewCachedResponse(bufferLength, server.bufferPool) - session = &meekSession{ + // The cookie name, Content-Type, and HTTP version of the first request in + // the session are recorded for stats. It's possible, but not expected, + // that later requests will have different values. + + session := &meekSession{ meekProtocolVersion: clientSessionData.MeekProtocolVersion, sessionIDSent: false, cachedResponse: cachedResponse, cookieName: meekCookie.Name, contentType: request.Header.Get("Content-Type"), + httpVersion: request.Proto, } session.touch() @@ -939,11 +1220,22 @@ func (server *MeekServer) getSessionOrEndpoint( if clientSessionData.MeekProtocolVersion >= MEEK_PROTOCOL_VERSION_2 { sessionID, err = makeMeekSessionID() if err != nil { - return "", nil, nil, "", nil, errors.Trace(err) + return "", nil, nil, "", "", nil, errors.Trace(err) } } server.sessionsLock.Lock() + + // MEEK_MAX_SESSION_COUNT is a simple sanity check and failsafe. Load + // limiting tuned to each server's host resources is provided by external + // components. See comment in newSSHServer for more details. + if len(server.sessions) >= MEEK_MAX_SESSION_COUNT { + server.sessionsLock.Unlock() + err := std_errors.New("MEEK_MAX_SESSION_COUNT exceeded") + log.WithTrace().Warning(err.Error()) + return "", nil, nil, "", "", nil, errors.Trace(err) + } + server.sessions[sessionID] = session server.sessionsLock.Unlock() @@ -959,7 +1251,7 @@ func (server *MeekServer) getSessionOrEndpoint( // will close when session.delete calls Close() on the meekConn. server.clientHandler(session.clientConn, additionalData) - return sessionID, session, underlyingConn, "", nil, nil + return sessionID, session, underlyingConn, "", "", nil, nil } func (server *MeekServer) rateLimit( @@ -1181,6 +1473,10 @@ func (server *MeekServer) getMeekCookiePayload( errors.Trace(err), LogFields(logFields)) }, + + // To allow for meek retries, replay of the same meek cookie is + // permitted (but only from the same source IP). + DisableStrictHistoryMode: true, }, clientIP, reader) @@ -1219,14 +1515,30 @@ func (server *MeekServer) getMeekCookiePayload( return payload, nil } -// makeMeekTLSConfig creates a TLS config for a meek HTTPS listener. -// Currently, this config is optimized for fronted meek where the nature -// of the connection is non-circumvention; it's optimized for performance -// assuming the peer is an uncensored CDN. -func (server *MeekServer) makeMeekTLSConfig( - isFronted bool, useObfuscatedSessionTickets bool) (*tls.Config, error) { +func (server *MeekServer) getWebServerCertificate() ([]byte, []byte, error) { + + var certificate, privateKey string + + if server.support.Config.MeekServerCertificate != "" { + certificate = server.support.Config.MeekServerCertificate + privateKey = server.support.Config.MeekServerPrivateKey - certificate, privateKey, err := common.GenerateWebServerCertificate(values.GetHostName()) + } else { + var err error + certificate, privateKey, _, err = common.GenerateWebServerCertificate(values.GetHostName()) + if err != nil { + return nil, nil, errors.Trace(err) + } + } + + return []byte(certificate), []byte(privateKey), nil +} + +// makeFrontedMeekTLSConfig creates a TLS config for a fronted meek HTTPS +// listener. +func (server *MeekServer) makeFrontedMeekTLSConfig() (*tls.Config, error) { + + certificate, privateKey, err := server.getWebServerCertificate() if err != nil { return nil, errors.Trace(err) } @@ -1242,38 +1554,71 @@ func (server *MeekServer) makeMeekTLSConfig( minVersionCandidates := []uint16{tls.VersionTLS10, tls.VersionTLS11, tls.VersionTLS12} minVersion := minVersionCandidates[prng.Intn(len(minVersionCandidates))] + // This is a reordering of the supported CipherSuites in golang 1.6[*]. Non-ephemeral key + // CipherSuites greatly reduce server load, and we try to select these since the meek + // protocol is providing obfuscation, not privacy/integrity (this is provided by the + // tunneled SSH), so we don't benefit from the perfect forward secrecy property provided + // by ephemeral key CipherSuites. + // https://github.com/golang/go/blob/1cb3044c9fcd88e1557eca1bf35845a4108bc1db/src/crypto/tls/cipher_suites.go#L75 + // + // This optimization is applied only when there's a CDN in front of the meek server; in + // unfronted cases we prefer a more natural TLS handshake. + // + // [*] the list has since been updated, removing CipherSuites using RC4 and 3DES. + cipherSuites := []uint16{ + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + } + config := &tls.Config{ Certificates: []tls.Certificate{tlsCertificate}, - NextProtos: []string{"http/1.1"}, + // Offer and prefer "h2" for HTTP/2 support. + NextProtos: []string{"h2", "http/1.1"}, MinVersion: minVersion, + CipherSuites: cipherSuites, } - if isFronted { - // This is a reordering of the supported CipherSuites in golang 1.6[*]. Non-ephemeral key - // CipherSuites greatly reduce server load, and we try to select these since the meek - // protocol is providing obfuscation, not privacy/integrity (this is provided by the - // tunneled SSH), so we don't benefit from the perfect forward secrecy property provided - // by ephemeral key CipherSuites. - // https://github.com/golang/go/blob/1cb3044c9fcd88e1557eca1bf35845a4108bc1db/src/crypto/tls/cipher_suites.go#L75 - // - // This optimization is applied only when there's a CDN in front of the meek server; in - // unfronted cases we prefer a more natural TLS handshake. - // - // [*] the list has since been updated, removing CipherSuites using RC4 and 3DES. - config.CipherSuites = []uint16{ - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - } + return config, nil +} + +// makeDirectMeekTLSConfig creates a TLS config for a direct meek HTTPS +// listener. +func (server *MeekServer) makeDirectMeekTLSConfig( + useObfuscatedSessionTickets bool) (*psiphon_tls.Config, error) { + + certificate, privateKey, err := server.getWebServerCertificate() + if err != nil { + return nil, errors.Trace(err) + } + + tlsCertificate, err := psiphon_tls.X509KeyPair( + []byte(certificate), []byte(privateKey)) + if err != nil { + return nil, errors.Trace(err) + } + + // Vary the minimum version to frustrate scanning/fingerprinting of unfronted servers. + // Limitation: like the certificate, this value changes on restart. + minVersionCandidates := []uint16{tls.VersionTLS10, tls.VersionTLS11, tls.VersionTLS12} + minVersion := minVersionCandidates[prng.Intn(len(minVersionCandidates))] + + config := &psiphon_tls.Config{ + Certificates: []psiphon_tls.Certificate{tlsCertificate}, + // Omit "h2", so HTTP/2 is not negotiated. Note that the + // negotiated-ALPN extension in the ServerHello is plaintext, even in + // TLS 1.3. + NextProtos: []string{"http/1.1"}, + MinVersion: minVersion, } if useObfuscatedSessionTickets { @@ -1471,6 +1816,182 @@ func (server *MeekServer) makeMeekHTTPNormalizerListener() *transforms.HTTPNorma return normalizer } +func (server *MeekServer) inproxyReloadTactics() error { + + // Assumes no GeoIP targeting for InproxyAllCommonCompartmentIDs and other + // general broker tactics. + + p, err := server.support.ServerTacticsParametersCache.Get(NewGeoIPData()) + if err != nil { + return errors.Trace(err) + } + if p.IsNil() { + return nil + } + + commonCompartmentIDs, err := inproxy.IDsFromStrings( + p.Strings(parameters.InproxyAllCommonCompartmentIDs)) + if err != nil { + return errors.Trace(err) + } + + server.inproxyBroker.SetCommonCompartmentIDs(commonCompartmentIDs) + + server.inproxyBroker.SetTimeouts( + p.Duration(parameters.InproxyBrokerProxyAnnounceTimeout), + p.Duration(parameters.InproxyBrokerClientOfferTimeout), + p.Duration(parameters.InproxyBrokerPendingServerRequestsTTL)) + + nonlimitedProxyIDs, err := inproxy.IDsFromStrings( + p.Strings(parameters.InproxyBrokerMatcherAnnouncementNonlimitedProxyIDs)) + if err != nil { + return errors.Trace(err) + } + server.inproxyBroker.SetLimits( + p.Int(parameters.InproxyBrokerMatcherAnnouncementLimitEntryCount), + p.Int(parameters.InproxyBrokerMatcherAnnouncementRateLimitQuantity), + p.Duration(parameters.InproxyBrokerMatcherAnnouncementRateLimitInterval), + nonlimitedProxyIDs, + p.Int(parameters.InproxyBrokerMatcherOfferLimitEntryCount), + p.Int(parameters.InproxyBrokerMatcherOfferRateLimitQuantity), + p.Duration(parameters.InproxyBrokerMatcherOfferRateLimitInterval), + p.Int(parameters.InproxyMaxCompartmentIDListLength)) + + return nil +} + +func (server *MeekServer) lookupAllowTactic(geoIPData common.GeoIPData, parameterName string) bool { + // Fallback to not-allow on failure or nil tactics. + p, err := server.support.ServerTacticsParametersCache.Get(GeoIPData(geoIPData)) + if err != nil { + log.WithTraceFields(LogFields{"error": err}).Warning("ServerTacticsParametersCache.Get failed") + return false + } + if p.IsNil() { + return false + } + return p.Bool(parameterName) +} + +func (server *MeekServer) inproxyBrokerAllowProxy(proxyGeoIPData common.GeoIPData) bool { + return server.lookupAllowTactic(proxyGeoIPData, parameters.InproxyAllowProxy) +} + +func (server *MeekServer) inproxyBrokerAllowClient(clientGeoIPData common.GeoIPData) bool { + return server.lookupAllowTactic(clientGeoIPData, parameters.InproxyAllowClient) +} + +func (server *MeekServer) inproxyBrokerAllowDomainFrontedDestinations(clientGeoIPData common.GeoIPData) bool { + return server.lookupAllowTactic(clientGeoIPData, parameters.InproxyAllowDomainFrontedDestinations) +} + +// inproxyBrokerGetTactics is a callback used by the in-proxy broker to +// provide tactics to proxies. +// +// The proxy sends its current tactics tag in apiParameters, and, when there +// are new tactics, inproxyBrokerGetTactics returns the payload and the new +// tactics tag. The broker should log new_tactics_tag in its ProxyAnnounce +// handler. +func (server *MeekServer) inproxyBrokerGetTactics( + geoIPData common.GeoIPData, + apiParameters common.APIParameters) ([]byte, string, error) { + + tacticsPayload, err := server.support.TacticsServer.GetTacticsPayload( + geoIPData, apiParameters) + if err != nil { + return nil, "", errors.Trace(err) + } + + var marshaledTacticsPayload []byte + newTacticsTag := "" + + if tacticsPayload != nil { + + marshaledTacticsPayload, err = json.Marshal(tacticsPayload) + if err != nil { + return nil, "", errors.Trace(err) + } + + if len(tacticsPayload.Tactics) > 0 { + newTacticsTag = tacticsPayload.Tag + } + } + + return marshaledTacticsPayload, newTacticsTag, nil +} + +// inproxyBrokerHandler reads an in-proxy broker session protocol message from +// the HTTP request body, dispatches the message to the broker, and writes +// the broker session response message to the HTTP response body. +// +// The HTTP response write timeout may be extended be the broker, as required. +// Error cases can return without writing any HTTP response. The caller +// should invoke server.handleError when an error is returned. +func (server *MeekServer) inproxyBrokerHandler( + clientIP string, + geoIPData common.GeoIPData, + w http.ResponseWriter, + r *http.Request) (retErr error) { + + // Don't read more than MEEK_ENDPOINT_MAX_REQUEST_PAYLOAD_LENGTH bytes, as + // a sanity check and defense against potential resource exhaustion. + packet, err := ioutil.ReadAll(http.MaxBytesReader( + w, r.Body, MEEK_ENDPOINT_MAX_REQUEST_PAYLOAD_LENGTH)) + if err != nil { + return errors.Trace(err) + } + + extendTimeout := func(timeout time.Duration) { + + // Extend the HTTP response write timeout to accomodate the timeout + // specified by the broker, such as in the case of the ProxyAnnounce + // request long poll. The base httpClientIOTimeout value is added, as + // it covers HTTP transport network operations, which are not + // necessarily included in the broker's timeouts. + // + // Note that any existing write timeout of httpClientIOTimeout would + // have been set before the body read, which may have consumed time, + // so adding the full httpClientIOTimeout value again may exceed the + // original httpClientIOTimeout target. + + http.NewResponseController(w).SetWriteDeadline( + time.Now().Add(server.httpClientIOTimeout + timeout)) + } + + // Per https://pkg.go.dev/net/http#Request.Context, the request context is + // canceled when the client's connection closes or an HTTP/2 request is + // canceled. So it is expected that the broker operation will abort and + // stop waiting (in the case of long polling) if the client disconnects + // for any reason before a response is sent. + // + // When fronted by a CDN using persistent connections used to multiplex + // many clients, it is expected that CDNs will perform an HTTP/3 request + // cancellation in this scenario. + + transportLogFields := common.LogFields{ + "meek_server_http_version": r.Proto, + } + + packet, err = server.inproxyBroker.HandleSessionPacket( + r.Context(), + extendTimeout, + transportLogFields, + clientIP, + geoIPData, + packet) + if err != nil { + return errors.Trace(err) + } + + w.WriteHeader(http.StatusOK) + _, err = w.Write(packet) + if err != nil { + return errors.Trace(err) + } + + return nil +} + type meekSession struct { // Note: 64-bit ints used with atomic operations are placed // at the start of struct to ensure 64-bit alignment. @@ -1492,6 +2013,7 @@ type meekSession struct { cachedResponse *CachedResponse cookieName string contentType string + httpVersion string } func (session *meekSession) touch() { @@ -1562,6 +2084,7 @@ func (session *meekSession) GetMetrics() common.LogFields { logFields["meek_underlying_connection_count"] = atomic.LoadInt64(&session.metricUnderlyingConnCount) logFields["meek_cookie_name"] = session.cookieName logFields["meek_content_type"] = session.contentType + logFields["meek_server_http_version"] = session.httpVersion return logFields } diff --git a/psiphon/server/meekBuffer.go b/psiphon/server/meekBuffer.go index 8828adf5f..271e6b92a 100644 --- a/psiphon/server/meekBuffer.go +++ b/psiphon/server/meekBuffer.go @@ -225,9 +225,12 @@ func (response *CachedResponse) Write(data []byte) (int, error) { if response.writeBufferIndex == len(response.buffers)-1 && !response.overwriting { - extendedBuffer := response.extendedBufferPool.Get() - if extendedBuffer != nil { - response.buffers = append(response.buffers, extendedBuffer) + extendedBufferCount := len(response.buffers) - 1 + if extendedBufferCount < response.extendedBufferPool.limit { + extendedBuffer := response.extendedBufferPool.Get() + if extendedBuffer != nil { + response.buffers = append(response.buffers, extendedBuffer) + } } } @@ -257,13 +260,14 @@ func (response *CachedResponse) Write(data []byte) (int, error) { type CachedResponseBufferPool struct { bufferSize int buffers chan []byte + limit int } // NewCachedResponseBufferPool creates a new CachedResponseBufferPool // with the specified number of buffers. Buffers are allocated on // demand and once allocated remain allocated. func NewCachedResponseBufferPool( - bufferSize, bufferCount int) *CachedResponseBufferPool { + bufferSize, bufferCount int, limit int) *CachedResponseBufferPool { buffers := make(chan []byte, bufferCount) for i := 0; i < bufferCount; i++ { @@ -273,6 +277,7 @@ func NewCachedResponseBufferPool( return &CachedResponseBufferPool{ bufferSize: bufferSize, buffers: buffers, + limit: limit, } } diff --git a/psiphon/server/meek_test.go b/psiphon/server/meek_test.go index e01e56783..ed22dc6c5 100755 --- a/psiphon/server/meek_test.go +++ b/psiphon/server/meek_test.go @@ -59,45 +59,54 @@ func TestCachedResponse(t *testing.T) { bufferSize int extendedBufferSize int extendedBufferCount int + extendedBufferLimit int minBytesPerWrite int maxBytesPerWrite int copyPosition int expectedSuccess bool }{ - {1, 16, 16, 0, 0, 1, 1, 0, true}, + {1, 16, 16, 0, 0, -1, 1, 1, 0, true}, - {1, 31, 16, 0, 0, 1, 1, 15, true}, + {1, 31, 16, 0, 0, -1, 1, 1, 15, true}, - {1, 16, 2, 2, 7, 1, 1, 0, true}, + {1, 16, 2, 2, 7, -1, 1, 1, 0, true}, - {1, 31, 15, 3, 5, 1, 1, 1, true}, + {1, 31, 15, 3, 5, -1, 1, 1, 1, true}, - {1, 16, 16, 0, 0, 1, 1, 16, true}, + {1, 16, 16, 0, 0, -1, 1, 1, 16, true}, - {1, 64*KB + 1, 64 * KB, 64 * KB, 1, 1, 1 * KB, 64 * KB, true}, + {1, 64*KB + 1, 64 * KB, 64 * KB, 1, -1, 1, 1 * KB, 64 * KB, true}, - {1, 10 * MB, 64 * KB, 64 * KB, 158, 1, 32 * KB, 0, false}, + {1, 10 * MB, 64 * KB, 64 * KB, 158, -1, 1, 32 * KB, 0, false}, - {1, 10 * MB, 64 * KB, 64 * KB, 159, 1, 32 * KB, 0, true}, + {1, 10 * MB, 64 * KB, 64 * KB, 159, -1, 1, 32 * KB, 0, true}, - {1, 10 * MB, 64 * KB, 64 * KB, 160, 1, 32 * KB, 0, true}, + {1, 10 * MB, 64 * KB, 64 * KB, 160, -1, 1, 32 * KB, 0, true}, - {1, 128 * KB, 64 * KB, 0, 0, 1, 1 * KB, 64 * KB, true}, + {1, 128 * KB, 64 * KB, 0, 0, -1, 1, 1 * KB, 64 * KB, true}, - {1, 128 * KB, 64 * KB, 0, 0, 1, 1 * KB, 63 * KB, false}, + {1, 128 * KB, 64 * KB, 0, 0, -1, 1, 1 * KB, 63 * KB, false}, - {1, 200 * KB, 64 * KB, 0, 0, 1, 1 * KB, 136 * KB, true}, + {1, 200 * KB, 64 * KB, 0, 0, -1, 1, 1 * KB, 136 * KB, true}, - {10, 10 * MB, 64 * KB, 64 * KB, 1589, 1, 32 * KB, 0, false}, + {10, 10 * MB, 64 * KB, 64 * KB, 1589, -1, 1, 32 * KB, 0, false}, - {10, 10 * MB, 64 * KB, 64 * KB, 1590, 1, 32 * KB, 0, true}, + {10, 10 * MB, 64 * KB, 64 * KB, 1590, -1, 1, 32 * KB, 0, true}, + + {10, 10 * MB, 64 * KB, 64 * KB, 1590, 32, 1, 32 * KB, 0, false}, } for _, testCase := range testCases { description := fmt.Sprintf("test case: %+v", testCase) t.Run(description, func(t *testing.T) { - pool := NewCachedResponseBufferPool(testCase.extendedBufferSize, testCase.extendedBufferCount) + limit := testCase.extendedBufferCount + if testCase.extendedBufferLimit != -1 { + limit = testCase.extendedBufferLimit + } + + pool := NewCachedResponseBufferPool( + testCase.extendedBufferSize, testCase.extendedBufferCount, limit) responses := make([]*CachedResponse, testCase.concurrentResponses) for i := 0; i < testCase.concurrentResponses; i++ { diff --git a/psiphon/server/net.go b/psiphon/server/net.go deleted file mode 100644 index 44c416a0e..000000000 --- a/psiphon/server/net.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2016, Psiphon Inc. - * All rights reserved. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -// for HTTPSServer.ServeTLS: -/* -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package server - -import ( - "net" - "net/http" - - tls "github.com/Psiphon-Labs/psiphon-tls" -) - -// HTTPSServer is a wrapper around http.Server which adds the -// ServeTLS function. -type HTTPSServer struct { - *http.Server -} - -// ServeTLS is similar to http.Serve, but uses TLS. -// -// The http package has both ListenAndServe and ListenAndServeTLS higher- -// level interfaces, but only Serve (not TLS) offers a lower-level interface that -// allows the caller to keep a refererence to the Listener, allowing for external -// shutdown. ListenAndServeTLS also requires the TLS cert and key to be in files -// and we avoid that here. -// -// Note that the http.Server.TLSConfig field is ignored and the tls.Config -// parameter is used intead. -func (server *HTTPSServer) ServeTLS(listener net.Listener, config *tls.Config) error { - tlsListener := tls.NewListener(listener, config) - return server.Serve(tlsListener) -} diff --git a/psiphon/server/passthrough_test.go b/psiphon/server/passthrough_test.go index 549c810f1..baa90730b 100644 --- a/psiphon/server/passthrough_test.go +++ b/psiphon/server/passthrough_test.go @@ -57,7 +57,7 @@ func testPassthrough(t *testing.T, legacy bool) { // Run passthrough web server - webServerCertificate, webServerPrivateKey, err := common.GenerateWebServerCertificate("example.org") + webServerCertificate, webServerPrivateKey, _, err := common.GenerateWebServerCertificate("example.org") if err != nil { t.Fatalf("common.GenerateWebServerCertificate failed: %s", err) } @@ -96,12 +96,10 @@ func testPassthrough(t *testing.T, legacy bool) { tunnelProtocol := protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_SESSION_TICKET generateConfigParams := &GenerateConfigParams{ - ServerIPAddress: "127.0.0.1", - EnableSSHAPIRequests: true, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, - Passthrough: true, - LegacyPassthrough: legacy, + ServerIPAddress: "127.0.0.1", + TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, + Passthrough: true, + LegacyPassthrough: legacy, } serverConfigJSON, _, _, _, encodedServerEntry, err := GenerateConfig(generateConfigParams) @@ -144,8 +142,8 @@ func testPassthrough(t *testing.T, legacy bool) { "DataRootDirectory" : "%s", "ClientPlatform" : "Windows", "ClientVersion" : "0", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "TargetServerEntry" : "%s" }`, testDataDirName, string(encodedServerEntry)) diff --git a/psiphon/server/replay_test.go b/psiphon/server/replay_test.go index ba7425cb1..65602b93e 100644 --- a/psiphon/server/replay_test.go +++ b/psiphon/server/replay_test.go @@ -122,10 +122,8 @@ func runServerReplayTest( // Run Psiphon server generateConfigParams := &GenerateConfigParams{ - ServerIPAddress: "127.0.0.1", - EnableSSHAPIRequests: true, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, + ServerIPAddress: "127.0.0.1", + TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, } serverConfigJSON, _, _, _, encodedServerEntry, err := GenerateConfig(generateConfigParams) @@ -282,8 +280,8 @@ func runServerReplayClient( "DataRootDirectory" : "%s", "ClientPlatform" : "Windows", "ClientVersion" : "0", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "TargetServerEntry" : "%s" }`, dataRootDir, string(encodedServerEntry)) diff --git a/psiphon/server/server_packetman_test.go b/psiphon/server/server_packetman_test.go index 718e40344..cb318c985 100644 --- a/psiphon/server/server_packetman_test.go +++ b/psiphon/server/server_packetman_test.go @@ -1,3 +1,4 @@ +//go:build PSIPHON_RUN_PACKET_MANIPULATOR_TEST // +build PSIPHON_RUN_PACKET_MANIPULATOR_TEST /* @@ -29,7 +30,6 @@ func TestServerPacketManipulation(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-SESSION-TICKET-OSSH", - enableSSHAPIRequests: true, doHotReload: false, doDefaultSponsorID: false, denyTrafficRules: false, diff --git a/psiphon/server/server_test.go b/psiphon/server/server_test.go index dbedcac2f..3985a0df0 100644 --- a/psiphon/server/server_test.go +++ b/psiphon/server/server_test.go @@ -51,6 +51,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/accesscontrol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" @@ -64,24 +65,13 @@ import ( "golang.org/x/net/proxy" ) -var serverIPAddress, testDataDirName string -var mockWebServerURL, mockWebServerExpectedResponse string -var mockWebServerPort = "8080" +var testDataDirName string +var mockWebServerURL, mockWebServerPort, mockWebServerExpectedResponse string func TestMain(m *testing.M) { flag.Parse() - serverIPv4Address, serverIPv6Address, err := common.GetRoutableInterfaceIPAddresses() - if err != nil { - fmt.Printf("error getting server IP address: %s\n", err) - os.Exit(1) - } - if serverIPv4Address != nil { - serverIPAddress = serverIPv4Address.String() - } else { - serverIPAddress = serverIPv6Address.String() - } - + var err error testDataDirName, err = ioutil.TempDir("", "psiphon-server-test") if err != nil { fmt.Printf("TempDir failed: %s\n", err) @@ -104,16 +94,23 @@ func runMockWebServer() (string, string) { serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(responseBody)) }) - webServerAddress := net.JoinHostPort(serverIPAddress, mockWebServerPort) server := &http.Server{ - Addr: webServerAddress, Handler: serveMux, } + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + fmt.Printf("net.Listen failed: %s\n", err) + os.Exit(1) + } + + listenAddress := listener.Addr().String() + _, mockWebServerPort, _ = net.SplitHostPort(listenAddress) + go func() { - err := server.ListenAndServe() + err := server.Serve(listener) if err != nil { - fmt.Printf("error running mock web server: %s\n", err) + fmt.Printf("http.Server.Serve failed: %s\n", err) os.Exit(1) } }() @@ -121,7 +118,7 @@ func runMockWebServer() (string, string) { // TODO: properly synchronize with web server readiness time.Sleep(1 * time.Second) - return fmt.Sprintf("http://%s/", webServerAddress), responseBody + return fmt.Sprintf("http://%s/", listenAddress), responseBody } // Note: not testing fronted meek protocols, which client is @@ -131,7 +128,6 @@ func TestSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "SSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -144,7 +140,6 @@ func TestOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -157,7 +152,6 @@ func TestFragmentedOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -171,7 +165,6 @@ func TestPrefixedOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -186,7 +179,6 @@ func TestFragmentedPrefixedOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -206,7 +198,6 @@ func TestTLSOSSH(t *testing.T) { &runServerConfig{ tunnelProtocol: "TLS-OSSH", passthrough: true, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -218,7 +209,6 @@ func TestUnfrontedMeek(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -231,7 +221,6 @@ func TestFragmentedUnfrontedMeek(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -246,7 +235,6 @@ func TestUnfrontedMeekHTTPS(t *testing.T) { &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-HTTPS-OSSH", tlsProfile: protocol.TLS_PROFILE_RANDOMIZED, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -260,7 +248,6 @@ func TestFragmentedUnfrontedMeekHTTPS(t *testing.T) { &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-HTTPS-OSSH", tlsProfile: protocol.TLS_PROFILE_RANDOMIZED, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -275,7 +262,6 @@ func TestUnfrontedMeekHTTPSTLS13(t *testing.T) { &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-HTTPS-OSSH", tlsProfile: protocol.TLS_PROFILE_CHROME_70, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -289,7 +275,6 @@ func TestUnfrontedMeekSessionTicket(t *testing.T) { &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-SESSION-TICKET-OSSH", tlsProfile: protocol.TLS_PROFILE_CHROME_58, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -303,7 +288,6 @@ func TestUnfrontedMeekSessionTicketTLS13(t *testing.T) { &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-SESSION-TICKET-OSSH", tlsProfile: protocol.TLS_PROFILE_CHROME_70, - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -319,7 +303,6 @@ func TestTLSOSSHOverUnfrontedMeekHTTPSDemux(t *testing.T) { clientTunnelProtocol: "TLS-OSSH", passthrough: true, tlsProfile: protocol.TLS_PROFILE_CHROME_96, // TLS-OSSH requires TLS 1.3 support - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -334,7 +317,6 @@ func TestTLSOSSHOverUnfrontedMeekSessionTicketDemux(t *testing.T) { clientTunnelProtocol: "TLS-OSSH", passthrough: true, tlsProfile: protocol.TLS_PROFILE_CHROME_96, // TLS-OSSH requires TLS 1.3 support - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -349,7 +331,6 @@ func TestQUICOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "QUIC-OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -364,7 +345,6 @@ func TestLimitedQUICOSSH(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "QUIC-OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -373,13 +353,65 @@ func TestLimitedQUICOSSH(t *testing.T) { }) } -func TestWebTransportAPIRequests(t *testing.T) { +func TestInproxyOSSH(t *testing.T) { + if !inproxy.Enabled() { + t.Skip("inproxy is not enabled") + } runServer(t, &runServerConfig{ - tunnelProtocol: "OSSH", - omitAuthorization: true, + tunnelProtocol: "INPROXY-WEBRTC-OSSH", + requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, + doDanglingTCPConn: true, + doLogHostProvider: true, + doTargetBrokerSpecs: true, + }) +} + +func TestInproxyQUICOSSH(t *testing.T) { + if !quic.Enabled() { + t.Skip("QUIC is not enabled") + } + if !inproxy.Enabled() { + t.Skip("inproxy is not enabled") + } + runServer(t, + &runServerConfig{ + tunnelProtocol: "INPROXY-WEBRTC-QUIC-OSSH", + requireAuthorization: true, + doTunneledWebRequest: true, + doTunneledNTPRequest: true, + doLogHostProvider: true, + }) +} + +func TestInproxyUnfrontedMeekHTTPS(t *testing.T) { + if !inproxy.Enabled() { + t.Skip("inproxy is not enabled") + } + runServer(t, + &runServerConfig{ + tunnelProtocol: "INPROXY-WEBRTC-UNFRONTED-MEEK-HTTPS-OSSH", + requireAuthorization: true, + doTunneledWebRequest: true, + doTunneledNTPRequest: true, + doDanglingTCPConn: true, + doLogHostProvider: true, + }) +} + +func TestInproxyTLSOSSH(t *testing.T) { + if !inproxy.Enabled() { + t.Skip("inproxy is not enabled") + } + runServer(t, + &runServerConfig{ + tunnelProtocol: "INPROXY-WEBRTC-TLS-OSSH", + requireAuthorization: true, + doTunneledWebRequest: true, + doTunneledNTPRequest: true, + doDanglingTCPConn: true, doLogHostProvider: true, }) } @@ -388,7 +420,6 @@ func TestHotReload(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, requireAuthorization: true, doTunneledWebRequest: true, @@ -401,7 +432,6 @@ func TestHotReloadWithTactics(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "UNFRONTED-MEEK-OSSH", - enableSSHAPIRequests: true, doHotReload: true, requireAuthorization: true, doTunneledWebRequest: true, @@ -414,7 +444,6 @@ func TestDefaultSponsorID(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, doDefaultSponsorID: true, requireAuthorization: true, @@ -428,7 +457,6 @@ func TestDenyTrafficRules(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, denyTrafficRules: true, requireAuthorization: true, @@ -442,7 +470,6 @@ func TestOmitAuthorization(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, requireAuthorization: true, omitAuthorization: true, @@ -456,7 +483,6 @@ func TestNoAuthorization(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, omitAuthorization: true, doTunneledWebRequest: true, @@ -469,7 +495,6 @@ func TestUnusedAuthorization(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, doHotReload: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -481,7 +506,6 @@ func TestTCPOnlySLOK(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doLogHostProvider: true, @@ -492,7 +516,6 @@ func TestUDPOnlySLOK(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledNTPRequest: true, doLogHostProvider: true, @@ -503,7 +526,6 @@ func TestLivenessTest(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -516,7 +538,6 @@ func TestPruneServerEntries(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -530,7 +551,6 @@ func TestBurstMonitorAndDestinationBytes(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -545,7 +565,6 @@ func TestChangeBytesConfig(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -560,7 +579,6 @@ func TestSplitTunnel(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -574,12 +592,10 @@ func TestOmitProvider(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, doDanglingTCPConn: true, - doSplitTunnel: true, }) } @@ -587,7 +603,6 @@ func TestSteeringIP(t *testing.T) { runServer(t, &runServerConfig{ tunnelProtocol: "FRONTED-MEEK-OSSH", - enableSSHAPIRequests: true, requireAuthorization: true, doTunneledWebRequest: true, doTunneledNTPRequest: true, @@ -598,12 +613,24 @@ func TestSteeringIP(t *testing.T) { }) } +func TestLegacyAPIEncoding(t *testing.T) { + runServer(t, + &runServerConfig{ + tunnelProtocol: "OSSH", + requireAuthorization: true, + doTunneledWebRequest: true, + doTunneledNTPRequest: true, + doDanglingTCPConn: true, + doLogHostProvider: true, + useLegacyAPIEncoding: true, + }) +} + type runServerConfig struct { tunnelProtocol string clientTunnelProtocol string passthrough bool tlsProfile string - enableSSHAPIRequests bool doHotReload bool doDefaultSponsorID bool denyTrafficRules bool @@ -625,6 +652,8 @@ type runServerConfig struct { doLogHostProvider bool inspectFlows bool doSteeringIP bool + doTargetBrokerSpecs bool + useLegacyAPIEncoding bool } var ( @@ -645,6 +674,46 @@ func runServer(t *testing.T, runConfig *runServerConfig) { serverRuns += 1 + psiphonServerIPAddress := "127.0.0.1" + psiphonServerPort := 4000 + + // initialize server entry signing + + serverEntrySignaturePublicKey, + serverEntrySignaturePrivateKey, err := protocol.NewServerEntrySignatureKeyPair() + if err != nil { + t.Fatalf("error generating server entry signature key pair: %s", err) + } + + // generate inproxy configuration + + doInproxy := protocol.TunnelProtocolUsesInproxy(runConfig.tunnelProtocol) + + var inproxyTestConfig *inproxyTestConfig + if doInproxy { + + addMeekServerForBroker := true + brokerIPAddress := "127.0.0.1" + brokerPort := 4001 + + if protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) { + // Use the existing meek server as the broker server + addMeekServerForBroker = false + brokerPort = 4000 + } + + var err error + inproxyTestConfig, err = generateInproxyTestConfig( + addMeekServerForBroker, + runConfig.doTargetBrokerSpecs, + brokerIPAddress, + brokerPort, + serverEntrySignaturePublicKey) + if err != nil { + t.Fatalf("error generating inproxy test config: %s", err) + } + } + // configure authorized access accessType := "test-access-type" @@ -670,13 +739,15 @@ func runServer(t *testing.T, runConfig *runServerConfig) { authorizationIDStr := base64.StdEncoding.EncodeToString(authorizationID) - // Enable tactics when the test protocol is meek. Both the client and the - // server will be configured to support tactics. The client config will be - // set with a nonfunctional config so that the tactics request must - // succeed, overriding the nonfunctional values, for the tunnel to - // establish. + // Enable tactics when the test protocol is meek or uses inproxy. Both the + // client and the server will be configured to support tactics. The + // client config will be set with a nonfunctional config so that the + // tactics request must succeed, overriding the nonfunctional values, for + // the tunnel to establish. + + doClientTactics := protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) || + doInproxy - doClientTactics := protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) doServerTactics := doClientTactics || runConfig.applyPrefix || runConfig.forceFragmenting || @@ -697,13 +768,6 @@ func runServer(t *testing.T, runConfig *runServerConfig) { // create a server - psiphonServerIPAddress := serverIPAddress - if protocol.TunnelProtocolUsesQUIC(runConfig.tunnelProtocol) { - // Workaround for macOS firewall. - psiphonServerIPAddress = "127.0.0.1" - } - psiphonServerPort := 4000 - var limitQUICVersions protocol.QUICVersions if runConfig.limitQUICVersions { @@ -730,11 +794,16 @@ func runServer(t *testing.T, runConfig *runServerConfig) { } } + tunnelProtocolPorts := map[string]int{runConfig.tunnelProtocol: psiphonServerPort} + if doInproxy && inproxyTestConfig.addMeekServerForBroker { + tunnelProtocolPorts["UNFRONTED-MEEK-HTTPS-OSSH"] = inproxyTestConfig.brokerPort + } + generateConfigParams := &GenerateConfigParams{ + ServerEntrySignaturePublicKey: serverEntrySignaturePublicKey, + ServerEntrySignaturePrivateKey: serverEntrySignaturePrivateKey, ServerIPAddress: psiphonServerIPAddress, - EnableSSHAPIRequests: runConfig.enableSSHAPIRequests, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{runConfig.tunnelProtocol: psiphonServerPort}, + TunnelProtocolPorts: tunnelProtocolPorts, TunnelProtocolPassthroughAddresses: tunnelProtocolPassthroughAddresses, Passthrough: runConfig.passthrough, LimitQUICVersions: limitQUICVersions, @@ -791,6 +860,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { var tacticsConfigFilename string var tacticsTunnelProtocol string + var inproxyTacticsParametersJSON string // Only pave the tactics config when tactics are required. This exercises the // case where the tactics config is omitted. @@ -803,6 +873,10 @@ func runServer(t *testing.T, runConfig *runServerConfig) { tacticsTunnelProtocol = runConfig.tunnelProtocol } + if doInproxy { + inproxyTacticsParametersJSON = inproxyTestConfig.tacticsParametersJSON + } + paveTacticsConfigFile( t, tacticsConfigFilename, @@ -817,7 +891,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { runConfig.applyPrefix, runConfig.forceFragmenting, "classic", - ) + inproxyTacticsParametersJSON) } blocklistFilename := filepath.Join(testDataDirName, "blocklist.csv") @@ -881,6 +955,35 @@ func runServer(t *testing.T, runConfig *runServerConfig) { serverConfig["EnableSteeringIPs"] = true } + // In-proxy setup. + + if doInproxy { + + serverConfig["MeekServerRunInproxyBroker"] = true + + // Limitation: can't exercise MeekServerInproxyBrokerOnly, as the + // single meek server must also provide a tactics endpoint. + + serverConfig["MeekServerCertificate"] = inproxyTestConfig.brokerServerCertificate + serverConfig["MeekServerPrivateKey"] = inproxyTestConfig.brokerServerPrivateKey + serverConfig["MeekRequiredHeaders"] = inproxyTestConfig.brokerMeekRequiredHeaders + + serverConfig["InproxyBrokerSessionPrivateKey"] = + inproxyTestConfig.brokerSessionPrivateKey + + serverConfig["InproxyBrokerObfuscationRootSecret"] = + inproxyTestConfig.brokerObfuscationRootSecret + + serverConfig["InproxyBrokerServerEntrySignaturePublicKey"] = + inproxyTestConfig.brokerServerEntrySignaturePublicKey + + serverConfig["InproxyBrokerAllowCommonASNMatching"] = true + serverConfig["InproxyBrokerAllowBogonWebRTCConnections"] = true + } + + // Uncomment to enable SIGUSR2 profile dumps + //serverConfig["ProcessProfileOutputDirectory"] = "/tmp" + serverConfigJSON, _ = json.Marshal(serverConfig) uniqueUserLog := make(chan map[string]interface{}, 1) @@ -960,7 +1063,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { err := RunServices(serverConfigJSON) if err != nil { // TODO: wrong goroutine for t.FatalNow() - t.Errorf("error running server: %s", err) + t.Fatalf("error running server: %s", err) } }() @@ -982,7 +1085,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { select { case <-shutdownOk: case <-shutdownTimeout.C: - t.Errorf("server shutdown timeout exceeded") + t.Fatalf("server shutdown timeout exceeded") } } @@ -1042,7 +1145,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { runConfig.applyPrefix, runConfig.forceFragmenting, "consistent", - ) + inproxyTacticsParametersJSON) } p, _ := os.FindProcess(os.Getpid()) @@ -1094,8 +1197,8 @@ func runServer(t *testing.T, runConfig *runServerConfig) { "ClientPlatform" : "Android_10_com.test.app", "ClientVersion" : "0", "ClientFeatures" : %s, - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "DeviceLocation" : "gzzzz", "DeviceRegion" : "US", "DisableRemoteServerListFetcher" : true, @@ -1111,6 +1214,9 @@ func runServer(t *testing.T, runConfig *runServerConfig) { jsonLimitTLSProfiles, jsonNetworkID) + // Don't print initial config setup notices + psiphon.SetNoticeWriter(io.Discard) + clientConfig, err := psiphon.LoadConfig([]byte(clientConfigJSON)) if err != nil { t.Fatalf("error processing configuration file: %s", err) @@ -1174,6 +1280,46 @@ func runServer(t *testing.T, runConfig *runServerConfig) { clientConfig.MeekAdditionalHeaders = headers } + if runConfig.useLegacyAPIEncoding { + clientConfig.TargetAPIEncoding = protocol.PSIPHON_API_ENCODING_JSON + } + + if doInproxy { + + // Limitation: can't exercise DisableTunnels = true since the client + // is a singleton and so the single instance must act as both a + // client and proxy. This self-proxy scheme also requires setting + // InproxySkipAwaitFullyConnected. + + clientConfig.DisableTunnels = false + clientConfig.InproxyEnableProxy = true + clientConfig.InproxySkipAwaitFullyConnected = true + + clientConfig.InproxyProxySessionPrivateKey = inproxyTestConfig.proxySessionPrivateKey + clientConfig.InproxyMaxClients = 1 + clientConfig.InproxyLimitUpstreamBytesPerSecond = 0 + clientConfig.InproxyLimitDownstreamBytesPerSecond = 0 + clientConfig.ServerEntrySignaturePublicKey = inproxyTestConfig.brokerServerEntrySignaturePublicKey + + // Simulate a CDN adding required HTTP headers by injecting them at + // the client. + headers := make(http.Header) + for name, value := range inproxyTestConfig.brokerMeekRequiredHeaders { + headers.Add(name, value) + } + clientConfig.MeekAdditionalHeaders = headers + + // Configure the CAs required to verify the broker TLS certificate. + clientConfig.TrustedCACertificatesFilename = filepath.Join(testDataDirName, "rootCAs") + err = ioutil.WriteFile( + clientConfig.TrustedCACertificatesFilename, + []byte(inproxyTestConfig.brokerServerCertificate), + 0600) + if err != nil { + t.Fatalf("WriteFile failed: %s", err) + } + } + err = clientConfig.Commit(false) if err != nil { t.Fatalf("error committing configuration file: %s", err) @@ -1282,6 +1428,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) { } connectedServer := make(chan struct{}, 1) + inproxyActivity := make(chan struct{}, 1) tunnelsEstablished := make(chan struct{}, 1) homepageReceived := make(chan struct{}, 1) slokSeeded := make(chan struct{}, 1) @@ -1293,13 +1440,13 @@ func runServer(t *testing.T, runConfig *runServerConfig) { psiphon.SetNoticeWriter(psiphon.NewNoticeReceiver( func(notice []byte) { - //fmt.Printf("%s\n", string(notice)) - noticeType, payload, err := psiphon.GetNotice(notice) if err != nil { return } + printNotice := false + switch noticeType { case "ConnectedServer": @@ -1335,7 +1482,6 @@ func runServer(t *testing.T, runConfig *runServerConfig) { } case "ServerAlert": - reason := payload["reason"].(string) actionURLsPayload := payload["actionURLs"].([]interface{}) actionURLs := make([]string, len(actionURLsPayload)) @@ -1350,6 +1496,21 @@ func runServer(t *testing.T, runConfig *runServerConfig) { case "Untunneled": sendNotificationReceived(untunneledPortForward) + case "InproxyProxyTotalActivity": + + // This assumes that both non-zero bytes up and down are + // reported in at least same notice, although there's some + // unlikely chance it's only one or the other. + connectedClients := int(payload["connectedClients"].(float64)) + bytesUp := int(payload["totalBytesUp"].(float64)) + bytesDown := int(payload["totalBytesDown"].(float64)) + if connectedClients == 1 && bytesUp > 0 && bytesDown > 0 { + sendNotificationReceived(inproxyActivity) + } + } + + if printNotice { + fmt.Printf("%s\n", string(notice)) } })) @@ -1399,6 +1560,9 @@ func runServer(t *testing.T, runConfig *runServerConfig) { }() waitOnNotification(t, connectedServer, timeoutSignal, "connected server timeout exceeded") + if doInproxy { + waitOnNotification(t, inproxyActivity, timeoutSignal, "inproxy activity timeout exceeded") + } waitOnNotification(t, tunnelsEstablished, timeoutSignal, "tunnel established timeout exceeded") waitOnNotification(t, homepageReceived, timeoutSignal, "homepage received timeout exceeded") @@ -1430,8 +1594,10 @@ func runServer(t *testing.T, runConfig *runServerConfig) { livenessTestSize, runConfig.doBurstMonitor, false, - false, false, - "consistent") + runConfig.applyPrefix, + runConfig.forceFragmenting, + "consistent", + inproxyTacticsParametersJSON) p, _ := os.FindProcess(os.Getpid()) p.Signal(syscall.SIGUSR1) @@ -1442,9 +1608,6 @@ func runServer(t *testing.T, runConfig *runServerConfig) { expectTrafficFailure := runConfig.denyTrafficRules || (runConfig.omitAuthorization && runConfig.requireAuthorization) - // The client still reports zero domain_bytes when no port forwards are allowed (expectTrafficFailure) - expectDomainBytes := !runConfig.doChangeBytesConfig - if runConfig.doTunneledWebRequest { // Test: tunneled web site fetch @@ -1564,7 +1727,8 @@ func runServer(t *testing.T, runConfig *runServerConfig) { // without this delay. time.Sleep(100 * time.Millisecond) - expectClientBPFField := psiphon.ClientBPFEnabled() && doClientTactics + // For in-proxy tunnel protocols, client BPF tactics are currently ignored and not applied by the 2nd hop. + expectClientBPFField := psiphon.ClientBPFEnabled() && doClientTactics && !protocol.TunnelProtocolUsesInproxy(runConfig.tunnelProtocol) expectServerBPFField := ServerBPFEnabled() && protocol.TunnelProtocolIsDirect(runConfig.tunnelProtocol) && doServerTactics expectServerPacketManipulationField := runConfig.doPacketManipulation expectBurstFields := runConfig.doBurstMonitor @@ -1577,11 +1741,28 @@ func runServer(t *testing.T, runConfig *runServerConfig) { expectQUICVersion = limitQUICVersions[0] } expectDestinationBytesFields := runConfig.doDestinationBytes && !runConfig.doChangeBytesConfig + expectMeekHTTPVersion := "" + if protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) { + if protocol.TunnelProtocolUsesFrontedMeek(runConfig.tunnelProtocol) { + expectMeekHTTPVersion = "HTTP/2.0" + } else { + expectMeekHTTPVersion = "HTTP/1.1" + } + } + + // The client still reports zero domain_bytes when no port forwards are + // allowed (expectTrafficFailure). + // + // Limitation: this check is disabled in the in-proxy case since, in the + // self-proxy scheme, the proxy shuts down before the client can send its + // final status request. + expectDomainBytes := !runConfig.doChangeBytesConfig && !doInproxy select { case logFields := <-serverTunnelLog: err := checkExpectedServerTunnelLogFields( runConfig, + doClientTactics, expectClientBPFField, expectServerBPFField, expectServerPacketManipulationField, @@ -1592,6 +1773,8 @@ func runServer(t *testing.T, runConfig *runServerConfig) { expectQUICVersion, expectDestinationBytesFields, passthroughAddress, + expectMeekHTTPVersion, + inproxyTestConfig, logFields) if err != nil { t.Fatalf("invalid server tunnel log fields: %s", err) @@ -1827,6 +2010,7 @@ func waitOnNotification(t *testing.T, c, timeoutSignal <-chan struct{}, timeoutM func checkExpectedServerTunnelLogFields( runConfig *runServerConfig, + expectAppliedTacticsTag bool, expectClientBPFField bool, expectServerBPFField bool, expectServerPacketManipulationField bool, @@ -1837,6 +2021,8 @@ func checkExpectedServerTunnelLogFields( expectQUICVersion string, expectDestinationBytesFields bool, expectPassthroughAddress *string, + expectMeekHTTPVersion string, + inproxyTestConfig *inproxyTestConfig, fields map[string]interface{}) error { // Limitations: @@ -1882,6 +2068,11 @@ func checkExpectedServerTunnelLogFields( } } + appliedTacticsTag := len(fields[tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME].(string)) > 0 + if expectAppliedTacticsTag != appliedTacticsTag { + return fmt.Errorf("unexpected applied_tactics_tag") + } + if fields["host_id"].(string) != "example-host-id" { return fmt.Errorf("unexpected host_id '%s'", fields["host_id"]) } @@ -2022,7 +2213,8 @@ func checkExpectedServerTunnelLogFields( } } - if protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) && (runConfig.clientTunnelProtocol == "" || protocol.TunnelProtocolUsesMeekHTTPS(runConfig.clientTunnelProtocol)) { + if protocol.TunnelProtocolUsesMeek(runConfig.tunnelProtocol) && + (runConfig.clientTunnelProtocol == "" || protocol.TunnelProtocolUsesMeekHTTPS(runConfig.clientTunnelProtocol)) { for _, name := range []string{ "user_agent", @@ -2030,7 +2222,7 @@ func checkExpectedServerTunnelLogFields( "meek_cookie_size", "meek_limit_request", "meek_underlying_connection_count", - tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME, + "meek_server_http_version", } { if fields[name] == nil || fmt.Sprintf("%s", fields[name]) == "" { return fmt.Errorf("missing expected field '%s'", name) @@ -2040,6 +2232,10 @@ func checkExpectedServerTunnelLogFields( if !common.Contains(testUserAgents, fields["user_agent"].(string)) { return fmt.Errorf("unexpected user_agent '%s'", fields["user_agent"]) } + + if fields["meek_server_http_version"].(string) != expectMeekHTTPVersion { + return fmt.Errorf("unexpected meek_server_http_version '%s'", fields["meek_server_http_version"]) + } } if protocol.TunnelProtocolUsesMeekHTTP(runConfig.tunnelProtocol) { @@ -2073,7 +2269,8 @@ func checkExpectedServerTunnelLogFields( } } - if protocol.TunnelProtocolUsesMeekHTTPS(runConfig.tunnelProtocol) && (runConfig.clientTunnelProtocol == "" || protocol.TunnelProtocolUsesMeekHTTPS(runConfig.clientTunnelProtocol)) { + if protocol.TunnelProtocolUsesMeekHTTPS(runConfig.tunnelProtocol) && + (runConfig.clientTunnelProtocol == "" || protocol.TunnelProtocolUsesMeekHTTPS(runConfig.clientTunnelProtocol)) { for _, name := range []string{ "tls_profile", @@ -2149,6 +2346,50 @@ func checkExpectedServerTunnelLogFields( } } + if protocol.TunnelProtocolUsesInproxy(runConfig.tunnelProtocol) { + + for _, name := range []string{ + + // Fields sent by the broker and populated via + // inproxy.ServerBrokerSessions.HandlePacket + + "inproxy_broker_id", + "inproxy_connection_id", + "inproxy_proxy_id", + "inproxy_matched_common_compartments", + "inproxy_proxy_nat_type", + "inproxy_client_nat_type", + + // Fields sent by the client + + "inproxy_broker_transport", + "inproxy_broker_fronting_provider_id", + "inproxy_broker_dial_address", + "inproxy_broker_resolved_ip_address", + "inproxy_webrtc_randomize_dtls", + "inproxy_webrtc_padded_messages_sent", + "inproxy_webrtc_padded_messages_received", + "inproxy_webrtc_decoy_messages_sent", + "inproxy_webrtc_decoy_messages_received", + } { + if fields[name] == nil || fmt.Sprintf("%s", fields[name]) == "" { + return fmt.Errorf("missing expected field '%s'", name) + } + } + + if fields["inproxy_broker_id"].(string) != inproxyTestConfig.brokerSessionPublicKeyCurve25519 { + return fmt.Errorf("unexpected inproxy_broker_id '%s'", fields["inproxy_broker_id"]) + } + + if fields["inproxy_proxy_id"].(string) != inproxyTestConfig.proxySessionPublicKeyCurve25519 { + return fmt.Errorf("unexpected inproxy_proxy_id '%s'", fields["inproxy_proxy_id"]) + } + + if fields["inproxy_broker_fronting_provider_id"].(string) != inproxyTestConfig.brokerFrontingProviderID { + return fmt.Errorf("unexpected inproxy_broker_fronting_provider_id '%s'", fields["inproxy_broker_fronting_provider_id"]) + } + } + if runConfig.applyPrefix { if fields["ossh_prefix"] == nil || fmt.Sprintf("%s", fields["ossh_prefix"]) == "" { @@ -2687,7 +2928,7 @@ func pavePsinetDatabaseFile( discoveryServers []*psinet.DiscoveryServer) (string, string) { if sponsorID == "" { - sponsorID = prng.HexString(8) + sponsorID = strings.ToUpper(prng.HexString(8)) } defaultSponsorID := "" @@ -2941,7 +3182,7 @@ func paveOSLConfigFile(t *testing.T, oslConfigFilename string) string { } ` - propagationChannelID := prng.HexString(8) + propagationChannelID := strings.ToUpper(prng.HexString(8)) now := time.Now().UTC() epoch := now.Truncate(720 * time.Hour) @@ -2961,8 +3202,11 @@ func paveOSLConfigFile(t *testing.T, oslConfigFilename string) string { } func paveTacticsConfigFile( - t *testing.T, tacticsConfigFilename string, - tacticsRequestPublicKey, tacticsRequestPrivateKey, tacticsRequestObfuscatedKey string, + t *testing.T, + tacticsConfigFilename string, + tacticsRequestPublicKey string, + tacticsRequestPrivateKey string, + tacticsRequestObfuscatedKey string, tunnelProtocol string, propagationChannelID string, livenessTestSize int, @@ -2970,7 +3214,8 @@ func paveTacticsConfigFile( doDestinationBytes bool, applyOsshPrefix bool, enableOsshPrefixFragmenting bool, - discoveryStategy string) { + discoveryStategy string, + inproxyParametersJSON string) { // Setting LimitTunnelProtocols passively exercises the // server-side LimitTunnelProtocols enforcement. @@ -2986,7 +3231,8 @@ func paveTacticsConfigFile( "Parameters" : { %s %s - %s + %s + %s "LimitTunnelProtocols" : ["%s"], "FragmentorLimitProtocols" : ["%s"], "FragmentorProbability" : 1.0, @@ -3021,7 +3267,7 @@ func paveTacticsConfigFile( "ServerPacketManipulationSpecs" : [{"Name": "test-packetman-spec", "PacketSpecs": [["TCP-flags S"]]}], "ServerPacketManipulationProbability" : 1.0, "ServerProtocolPacketManipulations": {"All" : ["test-packetman-spec"]}, - "ServerDiscoveryStrategy": "%s" + "ServerDiscoveryStrategy": "%s" } }, "FilteredTactics" : [ @@ -3089,14 +3335,20 @@ func paveTacticsConfigFile( tacticsConfigJSON := fmt.Sprintf( tacticsConfigJSONFormat, - tacticsRequestPublicKey, tacticsRequestPrivateKey, tacticsRequestObfuscatedKey, + tacticsRequestPublicKey, + tacticsRequestPrivateKey, + tacticsRequestObfuscatedKey, burstParameters, destinationBytesParameters, osshPrefix, + inproxyParametersJSON, tunnelProtocol, tunnelProtocol, tunnelProtocol, - livenessTestSize, livenessTestSize, livenessTestSize, livenessTestSize, + livenessTestSize, + livenessTestSize, + livenessTestSize, + livenessTestSize, discoveryStategy, propagationChannelID, strings.ReplaceAll(testCustomHostNameRegex, `\`, `\\`), @@ -3119,6 +3371,229 @@ func paveBlocklistFile(t *testing.T, blocklistFilename string) { } } +type inproxyTestConfig struct { + tacticsParametersJSON string + + addMeekServerForBroker bool + brokerIPAddress string + brokerPort int + brokerSessionPublicKey string + brokerSessionPublicKeyCurve25519 string + brokerSessionPrivateKey string + brokerObfuscationRootSecret string + brokerServerEntrySignaturePublicKey string + brokerFrontingProviderID string + brokerServerCertificate string + brokerServerPrivateKey string + brokerMeekRequiredHeaders map[string]string + + proxySessionPublicKey string + proxySessionPublicKeyCurve25519 string + proxySessionPrivateKey string +} + +func generateInproxyTestConfig( + addMeekServerForBroker bool, + doTargetBrokerSpecs bool, + brokerIPAddress string, + brokerPort int, + serverEntrySignaturePublicKey string) (*inproxyTestConfig, error) { + + // Generate in-proxy configuration. + // + // In this test, a single common compartment ID is issued to all clients; + // the test client will get it via tactics. + // + // TODO: exercise personal compartment IDs + // + // Because of singletons in the Psiphon client, there can only be a single + // Psiphon client instance in this test process, and so it must act as + // it's own in-proxy proxy. + // + // To minimize external dependencies, STUN testing is disabled here; it is + // exercised in the common/inproxy package tests. + // + // InproxyBrokerAllowCommonASNMatching and + // InproxyBrokerAllowBogonWebRTCConnections must be set to true in the + // server/broker config, to allow matches with the same local network + // address. InproxyDisableIPv6ICECandidates is turned on, in tactics, + // since the test GeoIP database is IPv4-only (see paveGeoIPDatabaseFiles). + + commonCompartmentID, err := inproxy.MakeID() + if err != nil { + return nil, errors.Trace(err) + } + commonCompartmentIDStr := commonCompartmentID.String() + + brokerSessionPrivateKey, err := inproxy.GenerateSessionPrivateKey() + if err != nil { + return nil, errors.Trace(err) + } + brokerSessionPrivateKeyStr := brokerSessionPrivateKey.String() + + brokerSessionPublicKey, err := brokerSessionPrivateKey.GetPublicKey() + if err != nil { + return nil, errors.Trace(err) + } + brokerSessionPublicKeyStr := brokerSessionPublicKey.String() + + brokerSessionPublicKeyCurve25519, err := brokerSessionPublicKey.ToCurve25519() + if err != nil { + return nil, errors.Trace(err) + } + brokerSessionPublicKeyCurve25519Str := brokerSessionPublicKeyCurve25519.String() + + brokerRootObfuscationSecret, err := inproxy.GenerateRootObfuscationSecret() + if err != nil { + return nil, errors.Trace(err) + } + brokerRootObfuscationSecretStr := brokerRootObfuscationSecret.String() + + brokerFrontingProviderID := strings.ToUpper(prng.HexString(8)) + + brokerFrontingHostName := values.GetHostName() + + brokerServerCertificate, brokerServerPrivateKey, brokerVerifyPin, err := + common.GenerateWebServerCertificate(brokerFrontingHostName) + if err != nil { + return nil, errors.Trace(err) + } + + brokerMeekRequiredHeaders := map[string]string{"X-MeekRequiredHeader": prng.HexString(32)} + + proxySessionPrivateKey, err := inproxy.GenerateSessionPrivateKey() + if err != nil { + return nil, errors.Trace(err) + } + proxySessionPrivateKeyStr := proxySessionPrivateKey.String() + + proxySessionPublicKey, err := proxySessionPrivateKey.GetPublicKey() + if err != nil { + return nil, errors.Trace(err) + } + proxySessionPublicKeyStr := proxySessionPublicKey.String() + + proxySessionPublicKeyCurve25519, err := proxySessionPublicKey.ToCurve25519() + if err != nil { + return nil, errors.Trace(err) + } + proxySessionPublicKeyCurve25519Str := proxySessionPublicKeyCurve25519.String() + + address := net.JoinHostPort(brokerIPAddress, strconv.Itoa(brokerPort)) + addressRegex := strings.ReplaceAll(address, ".", "\\\\.") + + skipVerify := false + verifyServerName := brokerFrontingHostName + verifyPins := fmt.Sprintf("[\"%s\"]", brokerVerifyPin) + if prng.FlipCoin() { + skipVerify = true + verifyServerName = "" + verifyPins = "[]" + } + + brokerSpecsJSONFormat := ` + [{ + "BrokerPublicKey": "%s", + "BrokerRootObfuscationSecret": "%s", + "BrokerFrontingSpecs": [{ + "FrontingProviderID": "%s", + "Addresses": ["%s"], + "DisableSNI": true, + "SkipVerify": %v, + "VerifyServerName": "%s", + "VerifyPins": %s, + "Host": "%s" + }] + }] + ` + + validBrokerSpecsJSON := fmt.Sprintf( + brokerSpecsJSONFormat, + brokerSessionPublicKeyStr, + brokerRootObfuscationSecretStr, + brokerFrontingProviderID, + addressRegex, + skipVerify, + verifyServerName, + verifyPins, + brokerFrontingHostName) + + otherSessionPrivateKey, _ := inproxy.GenerateSessionPrivateKey() + otherSessionPublicKey, _ := otherSessionPrivateKey.GetPublicKey() + otherRootObfuscationSecret, _ := inproxy.GenerateRootObfuscationSecret() + + invalidBrokerSpecsJSON := fmt.Sprintf( + brokerSpecsJSONFormat, + otherSessionPublicKey.String(), + otherRootObfuscationSecret.String(), + prng.HexString(16), + prng.HexString(16), + false, + prng.HexString(16), + fmt.Sprintf("[\"%s\"]", prng.HexString(16)), + prng.HexString(16)) + + var brokerSpecsJSON, proxyBrokerSpecsJSON, clientBrokerSpecsJSON string + if doTargetBrokerSpecs { + // invalidBrokerSpecsJSON should be ignored when specific proxy/client + // broker specs are set. + brokerSpecsJSON = invalidBrokerSpecsJSON + proxyBrokerSpecsJSON = validBrokerSpecsJSON + clientBrokerSpecsJSON = validBrokerSpecsJSON + } else { + brokerSpecsJSON = validBrokerSpecsJSON + proxyBrokerSpecsJSON = "[]" + clientBrokerSpecsJSON = "[]" + } + + tacticsParametersJSONFormat := ` + "InproxyAllowProxy": true, + "InproxyAllowClient": true, + "InproxyTunnelProtocolSelectionProbability": 1.0, + "InproxyAllBrokerPublicKeys": ["%s", "%s"], + "InproxyBrokerSpecs": %s, + "InproxyProxyBrokerSpecs": %s, + "InproxyClientBrokerSpecs": %s, + "InproxyAllCommonCompartmentIDs": ["%s"], + "InproxyCommonCompartmentIDs": ["%s"], + "InproxyClientDiscoverNATProbability": 0.0, + "InproxyDisableSTUN": true, + "InproxyDisablePortMapping": true, + "InproxyDisableIPv6ICECandidates": true, + ` + + tacticsParametersJSON := fmt.Sprintf( + tacticsParametersJSONFormat, + brokerSessionPublicKeyStr, + otherSessionPublicKey.String(), + brokerSpecsJSON, + proxyBrokerSpecsJSON, + clientBrokerSpecsJSON, + commonCompartmentIDStr, + commonCompartmentIDStr) + + config := &inproxyTestConfig{ + tacticsParametersJSON: tacticsParametersJSON, + addMeekServerForBroker: addMeekServerForBroker, + brokerIPAddress: brokerIPAddress, + brokerPort: brokerPort, + brokerSessionPrivateKey: brokerSessionPrivateKeyStr, + brokerSessionPublicKey: brokerSessionPublicKeyStr, + brokerSessionPublicKeyCurve25519: brokerSessionPublicKeyCurve25519Str, + brokerObfuscationRootSecret: brokerRootObfuscationSecretStr, + brokerServerEntrySignaturePublicKey: serverEntrySignaturePublicKey, + brokerFrontingProviderID: brokerFrontingProviderID, + brokerServerCertificate: brokerServerCertificate, + brokerServerPrivateKey: brokerServerPrivateKey, + brokerMeekRequiredHeaders: brokerMeekRequiredHeaders, + proxySessionPublicKey: proxySessionPublicKeyStr, + proxySessionPublicKeyCurve25519: proxySessionPublicKeyCurve25519Str, + proxySessionPrivateKey: proxySessionPrivateKeyStr, + } + + return config, nil +} + type pruneServerEntryTestCase struct { IPAddress string ExplicitTag bool @@ -3177,7 +3652,6 @@ func initializePruneServerEntriesTest( _, _, _, _, encodedServerEntry, err := GenerateConfig( &GenerateConfigParams{ ServerIPAddress: testCase.IPAddress, - WebServerPort: 8000, TunnelProtocolPorts: map[string]int{runConfig.tunnelProtocol: dialPort}, }) if err != nil { @@ -3197,6 +3671,10 @@ func initializePruneServerEntriesTest( t.Fatalf("DecodeServerEntryFields failed: %s", err) } + // GenerateConfig now generates an explict tag for each server entry. + // To test the legacy case with no tag, delete it here. + delete(serverEntryFields, "tag") + if testCase.ExplicitTag { testCase.ExpectedTag = prng.Base64String(32) serverEntryFields.SetTag(testCase.ExpectedTag) @@ -3303,6 +3781,8 @@ func storePruneServerEntriesTest( return runConfig.tunnelProtocol, true }, serverEntry, + nil, + nil, false, 0, 0) diff --git a/psiphon/server/services.go b/psiphon/server/services.go index 25694e294..7a6de60e8 100644 --- a/psiphon/server/services.go +++ b/psiphon/server/services.go @@ -42,6 +42,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet" + "github.com/shirou/gopsutil/v4/cpu" ) // RunServices initializes support functions including logging and GeoIP services; @@ -182,10 +183,12 @@ func RunServices(configJSON []byte) (retErr error) { defer ticker.Stop() logNetworkBytes := true + logCPU := true previousNetworkBytesReceived, previousNetworkBytesSent, err := getNetworkBytesTransferred() if err != nil { - log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error("failed to get initial network bytes transferred") + log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error( + "failed to get initial network bytes transferred") // If getNetworkBytesTransferred fails, stop logging network // bytes for the lifetime of this process, in case there's a @@ -194,24 +197,49 @@ func RunServices(configJSON []byte) (retErr error) { logNetworkBytes = false } + // Establish initial previous CPU stats. The previous CPU stats + // are stored internally by gopsutil/cpu. + _, err = getCPUPercent() + if err != nil { + log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error( + "failed to get initial CPU percent") + + logCPU = false + } + for { select { case <-shutdownBroadcast: return case <-ticker.C: - var networkBytesReceived, networkBytesSent int64 + var networkBytesReceived, networkBytesSent int64 if logNetworkBytes { currentNetworkBytesReceived, currentNetworkBytesSent, err := getNetworkBytesTransferred() if err != nil { - log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error("failed to get current network bytes transferred") + log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error( + "failed to get current network bytes transferred") logNetworkBytes = false } else { networkBytesReceived = currentNetworkBytesReceived - previousNetworkBytesReceived networkBytesSent = currentNetworkBytesSent - previousNetworkBytesSent - previousNetworkBytesReceived, previousNetworkBytesSent = currentNetworkBytesReceived, currentNetworkBytesSent + previousNetworkBytesReceived, previousNetworkBytesSent = + currentNetworkBytesReceived, currentNetworkBytesSent + } + } + + var CPUPercent float64 + if logCPU { + recentCPUPercent, err := getCPUPercent() + if err != nil { + log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Error( + "failed to get recent CPU percent") + logCPU = false + + } else { + CPUPercent = recentCPUPercent } } @@ -220,7 +248,8 @@ func RunServices(configJSON []byte) (retErr error) { // networkBytesSent may be < 0. logServerLoad will not // log these negative values. - logServerLoad(support, logNetworkBytes, networkBytesReceived, networkBytesSent) + logServerLoad( + support, logNetworkBytes, networkBytesReceived, networkBytesSent, logCPU, CPUPercent) } } }() @@ -243,18 +272,6 @@ func RunServices(configJSON []byte) (retErr error) { }() } - if config.RunWebServer() { - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - err := RunWebServer(support, shutdownBroadcast) - select { - case errorChannel <- err: - default: - } - }() - } - // The tunnel server is always run; it launches multiple // listeners, depending on which tunnel protocols are enabled. waitGroup.Add(1) @@ -336,7 +353,7 @@ loop: case signalProcessProfiles <- struct{}{}: default: } - logServerLoad(support, false, 0, 0) + logServerLoad(support, false, 0, 0, false, 0) case <-systemStopSignal: log.WithTrace().Info("shutdown by system") @@ -414,7 +431,26 @@ func outputProcessProfiles(config *Config, filenameSuffix string) { } } -func logServerLoad(support *SupportServices, logNetworkBytes bool, networkBytesReceived int64, networkBytesSent int64) { +// getCPUPercent returns the overall system CPU percent (not the percent used +// by this process), across all cores. +func getCPUPercent() (float64, error) { + values, err := cpu.Percent(0, false) + if err != nil { + return 0, errors.Trace(err) + } + if len(values) != 1 { + return 0, errors.TraceNew("unexpected cpu.Percent return value") + } + return values[0], nil +} + +func logServerLoad( + support *SupportServices, + logNetworkBytes bool, + networkBytesReceived int64, + networkBytesSent int64, + logCPU bool, + CPUPercent float64) { serverLoad := getRuntimeMetrics() @@ -433,6 +469,10 @@ func logServerLoad(support *SupportServices, logNetworkBytes bool, networkBytesR } } + if logCPU { + serverLoad["cpu_percent"] = CPUPercent + } + establishTunnels, establishLimitedCount := support.TunnelServer.GetEstablishTunnelsMetrics() serverLoad["establish_tunnels"] = establishTunnels @@ -474,7 +514,7 @@ func logIrregularTunnel( support *SupportServices, listenerTunnelProtocol string, listenerPort int, - clientIP string, + peerIP string, tunnelError error, logFields LogFields) { @@ -485,8 +525,12 @@ func logIrregularTunnel( logFields["event_name"] = "irregular_tunnel" logFields["listener_protocol"] = listenerTunnelProtocol logFields["listener_port_number"] = listenerPort - support.GeoIPService.Lookup(clientIP).SetLogFields(logFields) logFields["tunnel_error"] = tunnelError.Error() + + // Note: logging with the "client_" prefix for legacy compatibility; it + // would be more correct to use the prefix "peer_". + support.GeoIPService.Lookup(peerIP).SetClientLogFields(logFields) + log.LogRawFieldsWithTimestamp(logFields) } @@ -610,6 +654,13 @@ func (support *SupportServices) Reload() { support.ReplayCache.Flush() support.ServerTacticsParametersCache.Flush() + err := support.TunnelServer.ReloadTactics() + if err != nil { + log.WithTraceFields( + LogFields{"error": errors.Trace(err)}).Warning( + "failed to reload tunnel server tactics") + } + if support.Config.RunPacketManipulator { err := reloadPacketManipulationSpecs(support) if err != nil { diff --git a/psiphon/server/sessionID_test.go b/psiphon/server/sessionID_test.go index 97296274a..0f97c8f5f 100644 --- a/psiphon/server/sessionID_test.go +++ b/psiphon/server/sessionID_test.go @@ -49,10 +49,8 @@ func TestDuplicateSessionID(t *testing.T) { // Configure server generateConfigParams := &GenerateConfigParams{ - ServerIPAddress: "127.0.0.1", - EnableSSHAPIRequests: true, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{"OSSH": 4000}, + ServerIPAddress: "127.0.0.1", + TunnelProtocolPorts: map[string]int{"OSSH": 4000}, } serverConfigJSON, _, _, _, encodedServerEntry, err := GenerateConfig(generateConfigParams) @@ -126,8 +124,8 @@ func TestDuplicateSessionID(t *testing.T) { clientConfigJSONTemplate := ` { "DataRootDirectory" : "%s", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "SessionID" : "00000000000000000000000000000000" }` @@ -171,6 +169,8 @@ func TestDuplicateSessionID(t *testing.T) { func(_ *protocol.ServerEntry, _ string) bool { return false }, func(_ *protocol.ServerEntry) (string, bool) { return "OSSH", true }, serverEntry, + nil, + nil, false, 0, 0) diff --git a/psiphon/server/tactics.go b/psiphon/server/tactics.go index 4b5483ba9..0445fa824 100644 --- a/psiphon/server/tactics.go +++ b/psiphon/server/tactics.go @@ -152,7 +152,8 @@ func (c *ServerTacticsParametersCache) Get( if err != nil { return nilAccessor, errors.Trace(err) } - _, err = params.Set("", false, tactics.Parameters) + _, err = params.Set( + "", parameters.ValidationServerSide, tactics.Parameters) if err != nil { return nilAccessor, errors.Trace(err) } diff --git a/psiphon/server/tlsTunnel.go b/psiphon/server/tlsTunnel.go index e840617c7..57e78452e 100644 --- a/psiphon/server/tlsTunnel.go +++ b/psiphon/server/tlsTunnel.go @@ -95,7 +95,7 @@ func (server *TLSTunnelServer) makeTLSTunnelConfig() (*tls.Config, error) { // Limitation: certificate value changes on restart. - certificate, privateKey, err := common.GenerateWebServerCertificate(values.GetHostName()) + certificate, privateKey, _, err := common.GenerateWebServerCertificate(values.GetHostName()) if err != nil { return nil, errors.Trace(err) } @@ -172,9 +172,10 @@ func (server *TLSTunnelServer) makeTLSTunnelConfig() (*tls.Config, error) { // strictMode is true as legitimate clients never retry TLS // connections using a previous random value. + strictMode := true ok, logFields := server.obfuscatorSeedHistory.AddNewWithTTL( - true, + strictMode, clientIP, "client-random", clientRandom, diff --git a/psiphon/server/tunnelServer.go b/psiphon/server/tunnelServer.go index cf640589b..f49d27d6d 100644 --- a/psiphon/server/tunnelServer.go +++ b/psiphon/server/tunnelServer.go @@ -32,6 +32,7 @@ import ( "io/ioutil" "net" "strconv" + "strings" "sync" "sync/atomic" "syscall" @@ -41,6 +42,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/accesscontrol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/ssh" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/monotime" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/obfuscator" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl" @@ -67,11 +69,13 @@ const ( SSH_KEEP_ALIVE_PAYLOAD_MAX_BYTES = 256 SSH_SEND_OSL_INITIAL_RETRY_DELAY = 30 * time.Second SSH_SEND_OSL_RETRY_FACTOR = 2 + GEOIP_SESSION_CACHE_TTL = 60 * time.Minute OSL_SESSION_CACHE_TTL = 5 * time.Minute MAX_AUTHORIZATIONS = 16 PRE_HANDSHAKE_RANDOM_STREAM_MAX_COUNT = 1 RANDOM_STREAM_MAX_BYTES = 10485760 ALERT_REQUEST_QUEUE_BUFFER_SIZE = 16 + SSH_MAX_CLIENT_COUNT = 100000 ) // TunnelServer is the main server that accepts Psiphon client @@ -163,17 +167,20 @@ func (server *TunnelServer) Run() error { } else if protocol.TunnelProtocolUsesQUIC(tunnelProtocol) { + // in-proxy QUIC tunnel protocols don't support gQUIC. + enableGQUIC := support.Config.EnableGQUIC && !protocol.TunnelProtocolUsesInproxy(tunnelProtocol) + logTunnelProtocol := tunnelProtocol listener, err = quic.Listen( CommonLogger(log), - func(clientAddress string, err error, logFields common.LogFields) { + func(peerAddress string, err error, logFields common.LogFields) { logIrregularTunnel( - support, logTunnelProtocol, listenPort, clientAddress, + support, logTunnelProtocol, listenPort, peerAddress, errors.Trace(err), LogFields(logFields)) }, localAddress, support.Config.ObfuscatedSSHKey, - support.Config.EnableGQUIC) + enableGQUIC) } else if protocol.TunnelProtocolUsesRefractionNetworking(tunnelProtocol) { @@ -299,58 +306,10 @@ func (server *TunnelServer) ResetAllClientOSLConfigs() { server.sshServer.resetAllClientOSLConfigs() } -// SetClientHandshakeState sets the handshake state -- that it completed and -// what parameters were passed -- in sshClient. This state is used for allowing -// port forwards and for future traffic rule selection. SetClientHandshakeState -// also triggers an immediate traffic rule re-selection, as the rules selected -// upon tunnel establishment may no longer apply now that handshake values are -// set. -// -// The authorizations received from the client handshake are verified and the -// resulting list of authorized access types are applied to the client's tunnel -// and traffic rules. -// -// A list of active authorization IDs, authorized access types, and traffic -// rate limits are returned for responding to the client and logging. -func (server *TunnelServer) SetClientHandshakeState( - sessionID string, - state handshakeState, - authorizations []string) (*handshakeStateInfo, error) { - - return server.sshServer.setClientHandshakeState(sessionID, state, authorizations) -} - -// GetClientHandshaked indicates whether the client has completed a handshake -// and whether its traffic rules are immediately exhausted. -func (server *TunnelServer) GetClientHandshaked( - sessionID string) (bool, bool, error) { - - return server.sshServer.getClientHandshaked(sessionID) -} - -// GetClientDisableDiscovery indicates whether discovery is disabled for the -// client corresponding to sessionID. -func (server *TunnelServer) GetClientDisableDiscovery( - sessionID string) (bool, error) { - - return server.sshServer.getClientDisableDiscovery(sessionID) -} - -// UpdateClientAPIParameters updates the recorded handshake API parameters for -// the client corresponding to sessionID. -func (server *TunnelServer) UpdateClientAPIParameters( - sessionID string, - apiParams common.APIParameters) error { - - return server.sshServer.updateClientAPIParameters(sessionID, apiParams) -} - -// AcceptClientDomainBytes indicates whether to accept domain bytes reported -// by the client. -func (server *TunnelServer) AcceptClientDomainBytes( - sessionID string) (bool, error) { - - return server.sshServer.acceptClientDomainBytes(sessionID) +// ReloadTactics signals components that use server-side tactics for one-time +// initialization to reload and use potentially changed parameters. +func (server *TunnelServer) ReloadTactics() error { + return errors.Trace(server.sshServer.reloadTactics()) } // SetEstablishTunnels sets whether new tunnels may be established or not. @@ -376,23 +335,32 @@ type sshServer struct { // Note: 64-bit ints used with atomic operations are placed // at the start of struct to ensure 64-bit alignment. // (https://golang.org/pkg/sync/atomic/#pkg-note-BUG) - lastAuthLog int64 - authFailedCount int64 - establishLimitedCount int64 - support *SupportServices - establishTunnels int32 - concurrentSSHHandshakes semaphore.Semaphore - shutdownBroadcast <-chan struct{} - sshHostKey ssh.Signer - clientsMutex sync.Mutex - stoppingClients bool - acceptedClientCounts map[string]map[string]int64 - clients map[string]*sshClient - oslSessionCacheMutex sync.Mutex - oslSessionCache *cache.Cache + lastAuthLog int64 + authFailedCount int64 + establishLimitedCount int64 + support *SupportServices + establishTunnels int32 + concurrentSSHHandshakes semaphore.Semaphore + shutdownBroadcast <-chan struct{} + sshHostKey ssh.Signer + obfuscatorSeedHistory *obfuscator.SeedHistory + inproxyBrokerSessions *inproxy.ServerBrokerSessions + + clientsMutex sync.Mutex + stoppingClients bool + acceptedClientCounts map[string]map[string]int64 + clients map[string]*sshClient + + geoIPSessionCache *cache.Cache + + oslSessionCacheMutex sync.Mutex + oslSessionCache *cache.Cache + authorizationSessionIDsMutex sync.Mutex authorizationSessionIDs map[string]string - obfuscatorSeedHistory *obfuscator.SeedHistory + + meekServersMutex sync.Mutex + meekServers []*MeekServer } func newSSHServer( @@ -415,6 +383,18 @@ func newSSHServer( concurrentSSHHandshakes = semaphore.New(support.Config.MaxConcurrentSSHHandshakes) } + // The geoIPSessionCache replaces the legacy cache that used to be in + // GeoIPServices and was used for the now-retired web API. That cache was + // also used for, and now geoIPSessionCache provides: + // - Determining first-tunnel-in-session (from a single server's point of + // view) + // - GeoIP for duplicate authorizations logic. + // + // TODO: combine geoIPSessionCache with oslSessionCache; need to deal with + // OSL flush on hot reload and reconcile differing TTLs. + + geoIPSessionCache := cache.New(GEOIP_SESSION_CACHE_TTL, 1*time.Minute) + // The OSL session cache temporarily retains OSL seed state // progress for disconnected clients. This enables clients // that disconnect and immediately reconnect to the same @@ -428,7 +408,72 @@ func newSSHServer( // were known, infer some activity. oslSessionCache := cache.New(OSL_SESSION_CACHE_TTL, 1*time.Minute) - return &sshServer{ + // inproxyBrokerSessions are the secure in-proxy broker/server sessions + // used to relay information from the broker to the server, including the + // original in-proxy client IP and the in-proxy proxy ID. + // + // Only brokers with public keys configured in the + // InproxyAllBrokerPublicKeys tactic parameter are allowed to connect to + // the server, and brokers verify the server's public key via the + // InproxySessionPublicKey server entry field. + // + // Sessions are initialized and run for all psiphond instances running any + // in-proxy tunnel protocol. + + var inproxyBrokerSessions *inproxy.ServerBrokerSessions + + runningInproxy := false + for tunnelProtocol, _ := range support.Config.TunnelProtocolPorts { + if protocol.TunnelProtocolUsesInproxy(tunnelProtocol) { + runningInproxy = true + break + } + } + + if runningInproxy { + + inproxyPrivateKey, err := inproxy.SessionPrivateKeyFromString( + support.Config.InproxyServerSessionPrivateKey) + if err != nil { + return nil, errors.Trace(err) + } + + inproxyObfuscationSecret, err := inproxy.ObfuscationSecretFromString( + support.Config.InproxyServerObfuscationRootSecret) + if err != nil { + return nil, errors.Trace(err) + } + + // The expected broker public keys are set in reloadTactics directly + // below, so none are set here. + inproxyBrokerSessions, err = inproxy.NewServerBrokerSessions( + inproxyPrivateKey, inproxyObfuscationSecret, nil) + if err != nil { + return nil, errors.Trace(err) + } + } + + // Limitation: rate limiting and resource limiting are handled by external + // components, and sshServer enforces only a sanity check limit on the + // number of entries in sshServer.clients; and no limit on the number of + // entries in sshServer.geoIPSessionCache or sshServer.oslSessionCache. + // + // To avoid resource exhaustion, this implementation relies on: + // + // - Per-peer IP address and/or overall network connection rate limiting, + // provided by iptables as configured by Psiphon automation + // (https://github.com/Psiphon-Inc/psiphon-automation/blob/ + // 4d913d13339d7d54c053a01e5a928e343045cde8/Automation/psi_ops_install.py#L1451). + // + // - Host CPU/memory/network monitoring and signalling, installed Psiphon + // automation + // (https://github.com/Psiphon-Inc/psiphon-automation/blob/ + // 4d913d13339d7d54c053a01e5a928e343045cde8/Automation/psi_ops_install.py#L935). + // When resource usage meets certain thresholds, the monitoring signals + // this process with SIGTSTP or SIGCONT, and handlers call + // sshServer.setEstablishTunnels to stop or resume accepting new clients. + + sshServer := &sshServer{ support: support, establishTunnels: 1, concurrentSSHHandshakes: concurrentSSHHandshakes, @@ -436,10 +481,21 @@ func newSSHServer( sshHostKey: signer, acceptedClientCounts: make(map[string]map[string]int64), clients: make(map[string]*sshClient), + geoIPSessionCache: geoIPSessionCache, oslSessionCache: oslSessionCache, authorizationSessionIDs: make(map[string]string), obfuscatorSeedHistory: obfuscator.NewSeedHistory(nil), - }, nil + inproxyBrokerSessions: inproxyBrokerSessions, + } + + // Initialize components that use server-side tactics and which reload on + // tactics change events. + err = sshServer.reloadTactics() + if err != nil { + return nil, errors.Trace(err) + } + + return sshServer, nil } func (sshServer *sshServer) setEstablishTunnels(establish bool) { @@ -487,15 +543,17 @@ type additionalTransportData struct { // occurs, it will send the error to the listenerError channel. func (sshServer *sshServer) runListener(sshListener *sshListener, listenerError chan<- error) { - handleClient := func(clientConn net.Conn, transportData *additionalTransportData) { + handleClient := func(conn net.Conn, transportData *additionalTransportData) { // Note: establish tunnel limiter cannot simply stop TCP // listeners in all cases (e.g., meek) since SSH tunnels can // span multiple TCP connections. if !sshServer.checkEstablishTunnels() { - log.WithTrace().Debug("not establishing tunnels") - clientConn.Close() + if IsLogLevelDebug() { + log.WithTrace().Debug("not establishing tunnels") + } + conn.Close() return } @@ -517,7 +575,7 @@ func (sshServer *sshServer) runListener(sshListener *sshListener, listenerError // client may dial a different port for its first hop. // Process each client connection concurrently. - go sshServer.handleClient(sshListener, clientConn, transportData) + go sshServer.handleClient(sshListener, conn, transportData) } // Note: when exiting due to a unrecoverable error, be sure @@ -525,7 +583,8 @@ func (sshServer *sshServer) runListener(sshListener *sshListener, listenerError // TunnelServer.Run will properly shut down instead of remaining // running. - if protocol.TunnelProtocolUsesMeekHTTP(sshListener.tunnelProtocol) || protocol.TunnelProtocolUsesMeekHTTPS(sshListener.tunnelProtocol) { + if protocol.TunnelProtocolUsesMeekHTTP(sshListener.tunnelProtocol) || + protocol.TunnelProtocolUsesMeekHTTPS(sshListener.tunnelProtocol) { if sshServer.tunnelProtocolUsesTLSDemux(sshListener.tunnelProtocol) { @@ -545,6 +604,7 @@ func (sshServer *sshServer) runListener(sshListener *sshListener, listenerError sshServer.shutdownBroadcast) if err == nil { + sshServer.registerMeekServer(meekServer) err = meekServer.Run() } @@ -565,7 +625,10 @@ func (sshServer *sshServer) runListener(sshListener *sshListener, listenerError // runMeekTLSOSSHDemuxListener blocks running a listener which demuxes meek and // TLS-OSSH connections received on the same port. -func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener, listenerError chan<- error, handleClient func(clientConn net.Conn, transportData *additionalTransportData)) { +func (sshServer *sshServer) runMeekTLSOSSHDemuxListener( + sshListener *sshListener, + listenerError chan<- error, + handleClient func(conn net.Conn, transportData *additionalTransportData)) { meekClassifier := protocolClassifier{ minBytesToMatch: 4, @@ -589,7 +652,11 @@ func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener }, } - listener, err := ListenTLSTunnel(sshServer.support, sshListener.Listener, sshListener.tunnelProtocol, sshListener.port) + listener, err := ListenTLSTunnel( + sshServer.support, + sshListener.Listener, + sshListener.tunnelProtocol, + sshListener.port) if err != nil { select { case listenerError <- errors.Trace(err): @@ -598,7 +665,11 @@ func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener return } - mux, listeners := newProtocolDemux(context.Background(), listener, []protocolClassifier{meekClassifier, tlsClassifier}, sshServer.support.Config.sshHandshakeTimeout) + mux, listeners := newProtocolDemux( + context.Background(), + listener, + []protocolClassifier{meekClassifier, tlsClassifier}, + sshServer.support.Config.sshHandshakeTimeout) var wg sync.WaitGroup @@ -644,7 +715,11 @@ func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener defer wg.Done() // Override the listener tunnel protocol to report TLS-OSSH instead. - runListener(listeners[1], sshServer.shutdownBroadcast, listenerError, protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH, handleClient) + runListener( + listeners[1], + sshServer.shutdownBroadcast, + listenerError, + protocol.TUNNEL_PROTOCOL_TLS_OBFUSCATED_SSH, handleClient) }() wg.Add(1) @@ -669,6 +744,7 @@ func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener sshServer.shutdownBroadcast) if err == nil { + sshServer.registerMeekServer(meekServer) err = meekServer.Run() } @@ -684,7 +760,13 @@ func (sshServer *sshServer) runMeekTLSOSSHDemuxListener(sshListener *sshListener wg.Wait() } -func runListener(listener net.Listener, shutdownBroadcast <-chan struct{}, listenerError chan<- error, overrideTunnelProtocol string, handleClient func(clientConn net.Conn, transportData *additionalTransportData)) { +func runListener( + listener net.Listener, + shutdownBroadcast <-chan struct{}, + listenerError chan<- error, + overrideTunnelProtocol string, + handleClient func(conn net.Conn, transportData *additionalTransportData)) { + for { conn, err := listener.Accept() @@ -726,8 +808,32 @@ func runListener(listener net.Listener, shutdownBroadcast <-chan struct{}, liste } } -// An accepted client has completed a direct TCP or meek connection and has a net.Conn. Registration -// is for tracking the number of connections. +// registerMeekServer registers a MeekServer instance to receive tactics +// reload signals. +func (sshServer *sshServer) registerMeekServer(meekServer *MeekServer) { + sshServer.meekServersMutex.Lock() + defer sshServer.meekServersMutex.Unlock() + + sshServer.meekServers = append(sshServer.meekServers, meekServer) +} + +// reloadMeekServerTactics signals each registered MeekServer instance that +// tactics have reloaded and may have changed. +func (sshServer *sshServer) reloadMeekServerTactics() error { + sshServer.meekServersMutex.Lock() + defer sshServer.meekServersMutex.Unlock() + + for _, meekServer := range sshServer.meekServers { + err := meekServer.ReloadTactics() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// An accepted client has completed a direct TCP or meek connection and has a +// net.Conn. Registration is for tracking the number of connections. func (sshServer *sshServer) registerAcceptedClient(tunnelProtocol, region string) { sshServer.clientsMutex.Lock() @@ -787,7 +893,7 @@ func (sshServer *sshServer) registerEstablishedClient(client *sshClient) bool { // - existingClient is invoking handshakeAPIRequestHandler // - sshServer.clients[client.sessionID] is updated to point to new client // - existingClient's handshakeAPIRequestHandler invokes - // SetClientHandshakeState but sets the handshake parameters for new + // setHandshakeState but sets the handshake parameters for new // client // - as a result, the new client handshake will fail (only a single handshake // is permitted) and the new client server_tunnel log will contain an @@ -834,6 +940,14 @@ func (sshServer *sshServer) registerEstablishedClient(client *sshClient) bool { return false } + // SSH_MAX_CLIENT_COUNT is a simple sanity check and failsafe. Load + // limiting tuned to each server's host resources is provided by external + // components. See comment in newSSHServer for more details. + if len(sshServer.clients) >= SSH_MAX_CLIENT_COUNT { + log.WithTrace().Warning("SSH_MAX_CLIENT_COUNT exceeded") + return false + } + sshServer.clients[client.sessionID] = client return true @@ -947,6 +1061,10 @@ func (sshServer *sshServer) getLoadStats() ( // Note: as currently tracked/counted, each established client is also an accepted client + // Accepted client counts use peer GeoIP data, which in the case of + // in-proxy tunnel protocols is the proxy, not the client. The original + // client IP is only obtained after the tunnel handshake has completed. + for tunnelProtocol, regionAcceptedClientCounts := range sshServer.acceptedClientCounts { for region, acceptedClientCount := range regionAcceptedClientCounts { @@ -968,8 +1086,13 @@ func (sshServer *sshServer) getLoadStats() ( client.Lock() + // Limitation: registerEstablishedClient is called before the + // handshake API completes; as a result, in the case of in-proxy + // tunnel protocol, clientGeoIPData may not yet be initialized and + // will count as None. + tunnelProtocol := client.tunnelProtocol - region := client.geoIPData.Country + region := client.clientGeoIPData.Country if regionStats[region] == nil { regionStats[region] = zeroProtocolStats() @@ -1174,9 +1297,11 @@ func (sshServer *sshServer) getLoadStats() ( // session_id. Concurrent proximate clients may be considered an // exact number of other _network connections_, even from the same // client. + // + // - For in-proxy tunnel protocols, the same GeoIP caveats + // (see comments above) apply. - region := client.geoIPData.Country - stats := regionStats[region]["ALL"] + stats := regionStats[client.peerGeoIPData.Country]["ALL"] n := stats["accepted_clients"].(int64) - 1 if n >= 0 { @@ -1191,6 +1316,8 @@ func (sshServer *sshServer) getLoadStats() ( } } + stats = regionStats[client.clientGeoIPData.Country]["ALL"] + n = stats["established_clients"].(int64) - 1 if n >= 0 { if client.peakMetrics.concurrentProximateEstablishedClients == nil { @@ -1252,72 +1379,58 @@ func (sshServer *sshServer) resetAllClientOSLConfigs() { } } -func (sshServer *sshServer) setClientHandshakeState( - sessionID string, - state handshakeState, - authorizations []string) (*handshakeStateInfo, error) { - - sshServer.clientsMutex.Lock() - client := sshServer.clients[sessionID] - sshServer.clientsMutex.Unlock() - - if client == nil { - return nil, errors.TraceNew("unknown session ID") - } - - handshakeStateInfo, err := client.setHandshakeState( - state, authorizations) - if err != nil { - return nil, errors.Trace(err) - } +// reloadTactics signals/invokes components that use server-side tactics for +// one-time initialization to reload and use potentially changed parameters. +func (sshServer *sshServer) reloadTactics() error { - return handshakeStateInfo, nil -} + // The following in-proxy components use server-side tactics with a + // one-time initialization: + // + // - For servers running in-proxy tunnel protocols, + // sshServer.inproxyBrokerSessions are the broker/server sessions and + // the set of expected broker public keys is set from tactics. + // - For servers running a broker within MeekServer, broker operational + // configuration is set from tactics. + // + // For these components, one-time initialization is more efficient than + // constantly fetching tactics. Instead, these components reinitialize + // when tactics change. -func (sshServer *sshServer) getClientHandshaked( - sessionID string) (bool, bool, error) { + // sshServer.inproxyBrokerSessions is not nil when the server is running + // in-proxy tunnel protocols. + if sshServer.inproxyBrokerSessions != nil { - sshServer.clientsMutex.Lock() - client := sshServer.clients[sessionID] - sshServer.clientsMutex.Unlock() - - if client == nil { - return false, false, errors.TraceNew("unknown session ID") - } + // Get InproxyAllBrokerPublicKeys from tactics. + // + // Limitation: assumes no GeoIP targeting for InproxyAllBrokerPublicKeys. - completed, exhausted := client.getHandshaked() + p, err := sshServer.support.ServerTacticsParametersCache.Get(NewGeoIPData()) + if err != nil { + return errors.Trace(err) + } - return completed, exhausted, nil -} + if !p.IsNil() { -func (sshServer *sshServer) getClientDisableDiscovery( - sessionID string) (bool, error) { + brokerPublicKeys, err := inproxy.SessionPublicKeysFromStrings( + p.Strings(parameters.InproxyAllBrokerPublicKeys)) + if err != nil { + return errors.Trace(err) + } - sshServer.clientsMutex.Lock() - client := sshServer.clients[sessionID] - sshServer.clientsMutex.Unlock() + // SetKnownBrokerPublicKeys will terminate any existing sessions + // for broker public keys no longer in the known/expected list; + // but will retain any existing sessions for broker public keys + // that remain in the list. + sshServer.inproxyBrokerSessions.SetKnownBrokerPublicKeys(brokerPublicKeys) - if client == nil { - return false, errors.TraceNew("unknown session ID") + } } - return client.getDisableDiscovery(), nil -} - -func (sshServer *sshServer) updateClientAPIParameters( - sessionID string, - apiParams common.APIParameters) error { - - sshServer.clientsMutex.Lock() - client := sshServer.clients[sessionID] - sshServer.clientsMutex.Unlock() - - if client == nil { - return errors.TraceNew("unknown session ID") + err := sshServer.reloadMeekServerTactics() + if err != nil { + return errors.Trace(err) } - client.updateAPIParameters(apiParams) - return nil } @@ -1346,20 +1459,6 @@ func (sshServer *sshServer) revokeClientAuthorizations(sessionID string) { client.setTrafficRules() } -func (sshServer *sshServer) acceptClientDomainBytes( - sessionID string) (bool, error) { - - sshServer.clientsMutex.Lock() - client := sshServer.clients[sessionID] - sshServer.clientsMutex.Unlock() - - if client == nil { - return false, errors.TraceNew("unknown session ID") - } - - return client.acceptDomainBytes(), nil -} - func (sshServer *sshServer) stopClients() { sshServer.clientsMutex.Lock() @@ -1375,7 +1474,7 @@ func (sshServer *sshServer) stopClients() { func (sshServer *sshServer) handleClient( sshListener *sshListener, - clientConn net.Conn, + conn net.Conn, transportData *additionalTransportData) { // overrideTunnelProtocol sets the tunnel protocol to a value other than @@ -1388,10 +1487,10 @@ func (sshServer *sshServer) handleClient( tunnelProtocol = transportData.overrideTunnelProtocol } - // Calling clientConn.RemoteAddr at this point, before any Read calls, + // Calling conn.RemoteAddr at this point, before any Read calls, // satisfies the constraint documented in tapdance.Listen. - clientAddr := clientConn.RemoteAddr() + peerAddr := conn.RemoteAddr() // Check if there were irregularities during the network connection // establishment. When present, log and then behave as Obfuscated SSH does @@ -1400,7 +1499,7 @@ func (sshServer *sshServer) handleClient( // One concrete irregular case is failure to send a PROXY protocol header for // TAPDANCE-OSSH. - if indicator, ok := clientConn.(common.IrregularIndicator); ok { + if indicator, ok := conn.(common.IrregularIndicator); ok { tunnelErr := indicator.IrregularTunnelError() @@ -1410,18 +1509,18 @@ func (sshServer *sshServer) handleClient( sshServer.support, sshListener.tunnelProtocol, sshListener.port, - common.IPAddressFromAddr(clientAddr), + common.IPAddressFromAddr(peerAddr), errors.Trace(tunnelErr), nil) var afterFunc *time.Timer if sshServer.support.Config.sshHandshakeTimeout > 0 { afterFunc = time.AfterFunc(sshServer.support.Config.sshHandshakeTimeout, func() { - clientConn.Close() + conn.Close() }) } - io.Copy(ioutil.Discard, clientConn) - clientConn.Close() + io.Copy(ioutil.Discard, conn) + conn.Close() afterFunc.Stop() return @@ -1430,6 +1529,10 @@ func (sshServer *sshServer) handleClient( // Get any packet manipulation values from GetAppliedSpecName as soon as // possible due to the expiring TTL. + // + // In the case of in-proxy tunnel protocols, the remote address will be + // the proxy, not the client, and GeoIP targeted packet manipulation will + // apply to the 2nd hop. serverPacketManipulation := "" replayedServerPacketManipulation := false @@ -1448,13 +1551,13 @@ func (sshServer *sshServer) handleClient( var localAddr, remoteAddr *net.TCPAddr var ok bool - underlying, ok := clientConn.(common.UnderlyingTCPAddrSource) + underlying, ok := conn.(common.UnderlyingTCPAddrSource) if ok { localAddr, remoteAddr, ok = underlying.GetUnderlyingTCPAddrs() } else { - localAddr, ok = clientConn.LocalAddr().(*net.TCPAddr) + localAddr, ok = conn.LocalAddr().(*net.TCPAddr) if ok { - remoteAddr, ok = clientConn.RemoteAddr().(*net.TCPAddr) + remoteAddr, ok = conn.RemoteAddr().(*net.TCPAddr) } } @@ -1468,11 +1571,14 @@ func (sshServer *sshServer) handleClient( } } - geoIPData := sshServer.support.GeoIPService.Lookup( - common.IPAddressFromAddr(clientAddr)) + // For in-proxy tunnel protocols, accepted client GeoIP reflects the proxy + // address, not the client. + + peerGeoIPData := sshServer.support.GeoIPService.Lookup( + common.IPAddressFromAddr(peerAddr)) - sshServer.registerAcceptedClient(tunnelProtocol, geoIPData.Country) - defer sshServer.unregisterAcceptedClient(tunnelProtocol, geoIPData.Country) + sshServer.registerAcceptedClient(tunnelProtocol, peerGeoIPData.Country) + defer sshServer.unregisterAcceptedClient(tunnelProtocol, peerGeoIPData.Country) // When configured, enforce a cap on the number of concurrent SSH // handshakes. This limits load spikes on busy servers when many clients @@ -1507,7 +1613,7 @@ func (sshServer *sshServer) handleClient( err := sshServer.concurrentSSHHandshakes.Acquire(ctx, 1) if err != nil { - clientConn.Close() + conn.Close() // This is a debug log as the only possible error is context timeout. log.WithTraceFields(LogFields{"error": err}).Debug( "acquire SSH handshake semaphore failed") @@ -1526,14 +1632,14 @@ func (sshServer *sshServer) handleClient( transportData, serverPacketManipulation, replayedServerPacketManipulation, - clientAddr, - geoIPData) + peerAddr, + peerGeoIPData) // sshClient.run _must_ call onSSHHandshakeFinished to release the semaphore: // in any error case; or, as soon as the SSH handshake phase has successfully // completed. - sshClient.run(clientConn, onSSHHandshakeFinished) + sshClient.run(conn, onSSHHandshakeFinished) } func (sshServer *sshServer) monitorPortForwardDialError(err error) { @@ -1566,26 +1672,73 @@ func (sshServer *sshServer) monitorPortForwardDialError(err error) { // tunnelProtocolUsesTLSDemux returns true if the server demultiplexes the given // protocol and TLS-OSSH over the same port. func (sshServer *sshServer) tunnelProtocolUsesTLSDemux(tunnelProtocol string) bool { - // Only use meek/TLS-OSSH demux if unfronted meek HTTPS with non-legacy passthrough. - if protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) && !protocol.TunnelProtocolUsesFrontedMeek(tunnelProtocol) { + + // Only use meek/TLS-OSSH demux if unfronted meek HTTPS with non-legacy + // passthrough, and not in-proxy. + if protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) && + !protocol.TunnelProtocolUsesFrontedMeek(tunnelProtocol) && + !protocol.TunnelProtocolUsesInproxy(tunnelProtocol) { _, passthroughEnabled := sshServer.support.Config.TunnelProtocolPassthroughAddresses[tunnelProtocol] return passthroughEnabled && !sshServer.support.Config.LegacyPassthrough } return false } +// setGeoIPSessionCache adds the sessionID/geoIPData pair to the session +// cache. This value will not expire; the caller must call +// markGeoIPSessionCacheToExpire to initiate expiry. Calling +// setGeoIPSessionCache for an existing sessionID will replace the previous +// value and reset any expiry. +func (sshServer *sshServer) setGeoIPSessionCache(sessionID string, geoIPData GeoIPData) { + sshServer.geoIPSessionCache.Set(sessionID, geoIPData, cache.NoExpiration) +} + +// markGeoIPSessionCacheToExpire initiates expiry for an existing session +// cache entry, if the session ID is found in the cache. Concurrency note: +// setGeoIPSessionCache and markGeoIPSessionCacheToExpire should not be +// called concurrently for a single session ID. +func (sshServer *sshServer) markGeoIPSessionCacheToExpire(sessionID string) { + geoIPData, found := sshServer.geoIPSessionCache.Get(sessionID) + // Note: potential race condition between Get and Set. In practice, + // the tunnel server won't clobber a SetSessionCache value by calling + // MarkSessionCacheToExpire concurrently. + if found { + sshServer.geoIPSessionCache.Set(sessionID, geoIPData, cache.DefaultExpiration) + } +} + +// getGeoIPSessionCache returns the cached GeoIPData for the specified session +// ID; a blank GeoIPData is returned if the session ID is not found in the +// cache. +func (sshServer *sshServer) getGeoIPSessionCache(sessionID string) GeoIPData { + geoIPData, found := sshServer.geoIPSessionCache.Get(sessionID) + if !found { + return NewGeoIPData() + } + return geoIPData.(GeoIPData) +} + +// inGeoIPSessionCache returns whether the session ID is present in the +// session cache. +func (sshServer *sshServer) inGeoIPSessionCache(sessionID string) bool { + _, found := sshServer.geoIPSessionCache.Get(sessionID) + return found +} + type sshClient struct { sync.Mutex sshServer *sshServer sshListener *sshListener tunnelProtocol string + isInproxyTunnelProtocol bool additionalTransportData *additionalTransportData sshConn ssh.Conn throttledConn *common.ThrottledConn serverPacketManipulation string replayedServerPacketManipulation bool - clientAddr net.Addr - geoIPData GeoIPData + peerAddr net.Addr + peerGeoIPData GeoIPData + clientGeoIPData GeoIPData sessionID string isFirstTunnelInSession bool supportsServerRequests bool @@ -1738,6 +1891,10 @@ type handshakeState struct { establishedTunnelsCount int splitTunnelLookup *splitTunnelLookup deviceRegion string + newTacticsTag string + inproxyClientIP string + inproxyClientGeoIPData GeoIPData + inproxyRelayLogFields common.LogFields } type destinationBytesMetrics struct { @@ -1827,8 +1984,8 @@ func newSshClient( transportData *additionalTransportData, serverPacketManipulation string, replayedServerPacketManipulation bool, - clientAddr net.Addr, - geoIPData GeoIPData) *sshClient { + peerAddr net.Addr, + peerGeoIPData GeoIPData) *sshClient { runCtx, stopRunning := context.WithCancel(context.Background()) @@ -1840,11 +1997,11 @@ func newSshClient( sshServer: sshServer, sshListener: sshListener, tunnelProtocol: tunnelProtocol, + isInproxyTunnelProtocol: protocol.TunnelProtocolUsesInproxy(tunnelProtocol), additionalTransportData: transportData, serverPacketManipulation: serverPacketManipulation, replayedServerPacketManipulation: replayedServerPacketManipulation, - clientAddr: clientAddr, - geoIPData: geoIPData, + peerAddr: peerAddr, isFirstTunnelInSession: true, qualityMetrics: newQualityMetrics(), tcpPortForwardLRU: common.NewLRUConns(), @@ -1859,9 +2016,30 @@ func newSshClient( client.tcpTrafficState.availablePortForwardCond = sync.NewCond(new(sync.Mutex)) client.udpTrafficState.availablePortForwardCond = sync.NewCond(new(sync.Mutex)) + // In the case of in-proxy tunnel protocols, clientGeoIPData is not set + // until the original client IP is relayed from the broker during the + // handshake. In other cases, clientGeoIPData is the peerGeoIPData + // (this includes fronted meek). + + client.peerGeoIPData = peerGeoIPData + if !client.isInproxyTunnelProtocol { + client.clientGeoIPData = peerGeoIPData + } + return client } +// getClientGeoIPData gets sshClient.clientGeoIPData. Use this helper when +// accessing this field without already holding a lock on the sshClient +// mutex. Unlike older code and unlike with client.peerGeoIPData, +// sshClient.clientGeoIPData is not static and may get set during the +// handshake, and it is not safe to access it without a lock. +func (sshClient *sshClient) getClientGeoIPData() GeoIPData { + sshClient.Lock() + defer sshClient.Unlock() + return sshClient.clientGeoIPData +} + func (sshClient *sshClient) run( baseConn net.Conn, onSSHHandshakeFinished func()) { @@ -1904,12 +2082,18 @@ func (sshClient *sshClient) run( // Further wrap the connection with burst monitoring, when enabled. // - // Limitation: burst parameters are fixed for the duration of the tunnel - // and do not change after a tactics hot reload. + // Limitations: + // + // - Burst parameters are fixed for the duration of the tunnel and do not + // change after a tactics hot reload. + // + // - In the case of in-proxy tunnel protocols, the original client IP is + // not yet known, and so burst monitoring GeoIP targeting uses the peer + // IP, which is the proxy, not the client. var burstConn *common.BurstMonitoredConn - p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.geoIPData) + p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.peerGeoIPData) if err != nil { log.WithTraceFields(LogFields{"error": errors.Trace(err)}).Warning( "ServerTacticsParametersCache.Get failed") @@ -1964,9 +2148,13 @@ func (sshClient *sshClient) run( // // A tunnel which fails to meet the targets but successfully completes any // liveness test and the API handshake is ignored in terms of replay scoring. + // + // In the case of in-proxy tunnel protocols, the peer address will be the + // proxy, not the client, and GeoIP targeted replay will apply to the 2nd + // hop. isReplayCandidate, replayWaitDuration, replayTargetDuration := - sshClient.sshServer.support.ReplayCache.GetReplayTargetDuration(sshClient.geoIPData) + sshClient.sshServer.support.ReplayCache.GetReplayTargetDuration(sshClient.peerGeoIPData) if isReplayCandidate { @@ -1991,7 +2179,7 @@ func (sshClient *sshClient) run( sshClient.sshServer.support.ReplayCache.SetReplayParameters( sshClient.tunnelProtocol, - sshClient.geoIPData, + sshClient.peerGeoIPData, sshClient.serverPacketManipulation, getFragmentorSeed(), bytesUp, @@ -2020,7 +2208,7 @@ func (sshClient *sshClient) run( if usedReplay { sshClient.sshServer.support.ReplayCache.FailedReplayParameters( sshClient.tunnelProtocol, - sshClient.geoIPData, + sshClient.peerGeoIPData, sshClient.serverPacketManipulation, getFragmentorSeed()) } @@ -2087,7 +2275,14 @@ func (sshClient *sshClient) run( if err == nil && protocol.TunnelProtocolUsesObfuscatedSSH(sshClient.tunnelProtocol) { - p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.geoIPData) + // In the case of in-proxy tunnel protocols, the peer address will + // be the proxy, not the client, and GeoIP targeted server-side + // OSSH tactics, including prefixes, will apply to the 2nd hop. + // + // It is recommended to set ServerOSSHPrefixSpecs, etc., in default + // tactics. + + p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.peerGeoIPData) // Log error, but continue. A default prefix spec will be used by the server. if err != nil { @@ -2114,12 +2309,12 @@ func (sshClient *sshClient) run( sshClient.sshServer.support.Config.ObfuscatedSSHKey, sshClient.sshServer.obfuscatorSeedHistory, serverOsshPrefixSpecs, - func(clientIP string, err error, logFields common.LogFields) { + func(peerIP string, err error, logFields common.LogFields) { logIrregularTunnel( sshClient.sshServer.support, sshClient.sshListener.tunnelProtocol, sshClient.sshListener.port, - clientIP, + peerIP, errors.Trace(err), LogFields(logFields)) }) @@ -2144,7 +2339,7 @@ func (sshClient *sshClient) run( // obfuscator message. See tactics.Listener.Accept. This must preceed // ssh.NewServerConn to ensure fragmentor is seeded before downstream bytes // are written. - if err == nil && sshClient.tunnelProtocol == protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH { + if err == nil && protocol.TunnelProtocolIsObfuscatedSSH(sshClient.tunnelProtocol) { fragmentor, ok := baseConn.(common.FragmentorAccessor) if ok { var fragmentorPRNG *prng.PRNG @@ -2299,10 +2494,11 @@ func (sshClient *sshClient) run( } sshClient.Unlock() - // Initiate cleanup of the GeoIP session cache. To allow for post-tunnel - // final status requests, the lifetime of cached GeoIP records exceeds the - // lifetime of the sshClient. - sshClient.sshServer.support.GeoIPService.MarkSessionCacheToExpire(sshClient.sessionID) + // Set the GeoIP session cache to expire; up to this point, the entry for + // this session ID has no expiry; retaining entries after the tunnel + // disconnects supports first-tunnel-in-session and duplicate + // authorization logic. + sshClient.sshServer.markGeoIPSessionCacheToExpire(sshClient.sessionID) } func (sshClient *sshClient) passwordCallback(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) { @@ -2344,11 +2540,8 @@ func (sshClient *sshClient) passwordCallback(conn ssh.ConnMetadata, password []b sessionID := sshPasswordPayload.SessionId // The GeoIP session cache will be populated if there was a previous tunnel - // with this session ID. This will be true up to GEOIP_SESSION_CACHE_TTL, which - // is currently much longer than the OSL session cache, another option to use if - // the GeoIP session cache is retired (the GeoIP session cache currently only - // supports legacy use cases). - isFirstTunnelInSession := !sshClient.sshServer.support.GeoIPService.InSessionCache(sessionID) + // with this session ID. This will be true up to GEOIP_SESSION_CACHE_TTL. + isFirstTunnelInSession := !sshClient.sshServer.inGeoIPSessionCache(sessionID) supportsServerRequests := common.Contains( sshPasswordPayload.ClientCapabilities, protocol.CLIENT_CAPABILITY_SERVER_REQUESTS) @@ -2361,17 +2554,12 @@ func (sshClient *sshClient) passwordCallback(conn ssh.ConnMetadata, password []b sshClient.isFirstTunnelInSession = isFirstTunnelInSession sshClient.supportsServerRequests = supportsServerRequests - geoIPData := sshClient.geoIPData - sshClient.Unlock() - // Store the GeoIP data associated with the session ID. This makes - // the GeoIP data available to the web server for web API requests. - // A cache that's distinct from the sshClient record is used to allow - // for or post-tunnel final status requests. - // If the client is reconnecting with the same session ID, this call - // will undo the expiry set by MarkSessionCacheToExpire. - sshClient.sshServer.support.GeoIPService.SetSessionCache(sessionID, geoIPData) + // Initially, in the case of in-proxy tunnel protocols, the GeoIP session + // cache entry will be the proxy's GeoIPData. This is updated to be the + // client's GeoIPData in setHandshakeState. + sshClient.sshServer.setGeoIPSessionCache(sessionID, sshClient.peerGeoIPData) return nil, nil } @@ -2559,23 +2747,9 @@ func (sshClient *sshClient) handleSSHRequests(requests <-chan *ssh.Request) { // All other requests are assumed to be API requests. - sshClient.Lock() - authorizedAccessTypes := sshClient.handshakeState.authorizedAccessTypes - sshClient.Unlock() - - // Note: unlock before use is only safe as long as referenced sshClient data, - // such as slices in handshakeState, is read-only after initially set. - - clientAddr := "" - if sshClient.clientAddr != nil { - clientAddr = sshClient.clientAddr.String() - } - responsePayload, err = sshAPIRequestHandler( sshClient.sshServer.support, - clientAddr, - sshClient.geoIPData, - authorizedAccessTypes, + sshClient, request.Type, request.Payload) } @@ -3078,19 +3252,35 @@ var serverTunnelStatParams = append( []requestParamSpec{ {"last_connected", isLastConnected, requestParamOptional}, {"establishment_duration", isIntString, requestParamOptional}}, - baseSessionAndDialParams...) + baseAndDialParams...) func (sshClient *sshClient) logTunnel(additionalMetrics []LogFields) { sshClient.Lock() + // For in-proxy tunnel protocols, two sets of GeoIP fields are logged, one + // for the client and one for the proxy. The client GeoIP fields will + // be "None" if handshake did not complete. + logFields := getRequestLogFields( "server_tunnel", - sshClient.geoIPData, + sshClient.sessionID, + sshClient.clientGeoIPData, sshClient.handshakeState.authorizedAccessTypes, sshClient.handshakeState.apiParams, serverTunnelStatParams) + if sshClient.isInproxyTunnelProtocol { + sshClient.peerGeoIPData.SetLogFieldsWithPrefix("", "proxy", logFields) + logFields.Add( + LogFields(sshClient.handshakeState.inproxyRelayLogFields)) + } + + // new_tactics_tag indicates that the handshake returned new tactics. + if sshClient.handshakeState.newTacticsTag != "" { + logFields["new_tactics_tag"] = sshClient.handshakeState.newTacticsTag + } + // "relay_protocol" is sent with handshake API parameters. In pre- // handshake logTunnel cases, this value is not yet known. As // sshClient.tunnelProtocol is authoritative, set this value @@ -3103,7 +3293,6 @@ func (sshClient *sshClient) logTunnel(additionalMetrics []LogFields) { if sshClient.sshListener.BPFProgramName != "" { logFields["server_bpf"] = sshClient.sshListener.BPFProgramName } - logFields["session_id"] = sshClient.sessionID logFields["is_first_tunnel_in_session"] = sshClient.isFirstTunnelInSession logFields["handshake_completed"] = sshClient.handshakeState.completed logFields["bytes_up_tcp"] = sshClient.tcpTrafficState.bytesUp @@ -3145,7 +3334,13 @@ func (sshClient *sshClient) logTunnel(additionalMetrics []LogFields) { logDestBytes := true if sshClient.sshServer.support.ServerTacticsParametersCache != nil { - p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.geoIPData) + + // Target this using the client, not peer, GeoIP. In the case of + // in-proxy tunnel protocols, the client GeoIP fields will be None + // if the handshake does not complete. In that case, no bytes will + // have transferred. + + p, err := sshClient.sshServer.support.ServerTacticsParametersCache.Get(sshClient.clientGeoIPData) if err != nil || p.IsNil() || sshClient.destinationBytesMetricsASN != p.String(parameters.DestinationBytesMetricsASN) { logDestBytes = false @@ -3223,7 +3418,6 @@ var blocklistHitsStatParams = []requestParamSpec{ {"device_region", isAnyString, requestParamOptional}, {"device_location", isGeoHashString, requestParamOptional}, {"egress_region", isRegionCode, requestParamOptional}, - {"session_id", isHexDigits, 0}, {"last_connected", isLastConnected, requestParamOptional}, } @@ -3231,15 +3425,19 @@ func (sshClient *sshClient) logBlocklistHits(IP net.IP, domain string, tags []Bl sshClient.Lock() + // Log this using the client, not peer, GeoIP. In the case of in-proxy + // tunnel protocols, the client GeoIP fields will be None if the + // handshake does not complete. In that case, no port forwarding will + // occur and there will not be any blocklist hits. + logFields := getRequestLogFields( "server_blocklist_hit", - sshClient.geoIPData, + sshClient.sessionID, + sshClient.clientGeoIPData, sshClient.handshakeState.authorizedAccessTypes, sshClient.handshakeState.apiParams, blocklistHitsStatParams) - logFields["session_id"] = sshClient.sessionID - // Note: see comment in logTunnel regarding unlock and concurrent access. sshClient.Unlock() @@ -3413,14 +3611,16 @@ func (sshClient *sshClient) getAlertActionURLs(alertReason string) []string { sshClient.Lock() sponsorID, _ := getStringRequestParam( sshClient.handshakeState.apiParams, "sponsor_id") + clientGeoIPData := sshClient.clientGeoIPData + deviceRegion := sshClient.handshakeState.deviceRegion sshClient.Unlock() return sshClient.sshServer.support.PsinetDatabase.GetAlertActionURLs( alertReason, sponsorID, - sshClient.geoIPData.Country, - sshClient.geoIPData.ASN, - sshClient.handshakeState.deviceRegion) + clientGeoIPData.Country, + clientGeoIPData.ASN, + deviceRegion) } func (sshClient *sshClient) rejectNewChannel(newChannel ssh.NewChannel, logMessage string) { @@ -3433,22 +3633,35 @@ func (sshClient *sshClient) rejectNewChannel(newChannel ssh.NewChannel, logMessa reason := ssh.Prohibited // Note: Debug level, as logMessage may contain user traffic destination address information - log.WithTraceFields( - LogFields{ - "channelType": newChannel.ChannelType(), - "logMessage": logMessage, - "rejectReason": reason.String(), - }).Debug("reject new channel") + if IsLogLevelDebug() { + log.WithTraceFields( + LogFields{ + "channelType": newChannel.ChannelType(), + "logMessage": logMessage, + "rejectReason": reason.String(), + }).Debug("reject new channel") + } // Note: logMessage is internal, for logging only; just the reject reason is sent to the client. newChannel.Reject(reason, reason.String()) } -// setHandshakeState records that a client has completed a handshake API request. -// Some parameters from the handshake request may be used in future traffic rule -// selection. Port forwards are disallowed until a handshake is complete. The -// handshake parameters are included in the session summary log recorded in -// sshClient.stop(). +// setHandshakeState sets the handshake state -- that it completed and +// what parameters were passed -- in sshClient. This state is used for allowing +// port forwards and for future traffic rule selection. setHandshakeState +// also triggers an immediate traffic rule re-selection, as the rules selected +// upon tunnel establishment may no longer apply now that handshake values are +// set. +// +// The authorizations received from the client handshake are verified and the +// resulting list of authorized access types are applied to the client's tunnel +// and traffic rules. +// +// A list of active authorization IDs, authorized access types, and traffic +// rate limits are returned for responding to the client and logging. +// +// All slices in the returnd handshakeStateInfo are read-only, as readers may +// reference slice contents outside of locks. func (sshClient *sshClient) setHandshakeState( state handshakeState, authorizations []string) (*handshakeStateInfo, error) { @@ -3457,6 +3670,19 @@ func (sshClient *sshClient) setHandshakeState( completed := sshClient.handshakeState.completed if !completed { sshClient.handshakeState = state + + if sshClient.isInproxyTunnelProtocol { + + // Set the client GeoIP data to the value obtained using the + // original client IP, from the broker, in the handshake. Also + // update the GeoIP session hash to use the client GeoIP data. + + sshClient.clientGeoIPData = + sshClient.handshakeState.inproxyClientGeoIPData + + sshClient.sshServer.setGeoIPSessionCache( + sshClient.sessionID, sshClient.clientGeoIPData) + } } sshClient.Unlock() @@ -3547,10 +3773,16 @@ func (sshClient *sshClient) setHandshakeState( "tunnel_error": "duplicate active authorization", "duplicate_authorization_id": authorizationID, } - sshClient.geoIPData.SetLogFields(logFields) - duplicateGeoIPData := sshClient.sshServer.support.GeoIPService.GetSessionCache(sessionID) - if duplicateGeoIPData != sshClient.geoIPData { - duplicateGeoIPData.SetLogFieldsWithPrefix("duplicate_authorization_", logFields) + + // Log this using client, not peer, GeoIP data. In the case of + // in-proxy tunnel protocols, the client GeoIP fields will be None + // if a handshake does not complete. However, presense of a + // (duplicate) authorization implies that the handshake completed. + + sshClient.getClientGeoIPData().SetClientLogFields(logFields) + duplicateClientGeoIPData := sshClient.sshServer.getGeoIPSessionCache(sessionID) + if duplicateClientGeoIPData != sshClient.getClientGeoIPData() { + duplicateClientGeoIPData.SetClientLogFieldsWithPrefix("duplicate_authorization_", logFields) } log.LogRawFieldsWithTimestamp(logFields) @@ -3755,8 +3987,12 @@ func (sshClient *sshClient) setOSLConfig() { // port forwards will not send progress to the new client // seed state. + // Use the client, not peer, GeoIP data. In the case of in-proxy tunnel + // protocols, the client GeoIP fields will be populated using the + // original client IP already received, from the broker, in the handshake. + sshClient.oslClientSeedState = sshClient.sshServer.support.OSLConfig.NewClientSeedState( - sshClient.geoIPData.Country, + sshClient.clientGeoIPData.Country, propagationChannelID, sshClient.signalIssueSLOKs) } @@ -3816,7 +4052,11 @@ func (sshClient *sshClient) setDestinationBytesMetrics() { return } - p, err := tacticsCache.Get(sshClient.geoIPData) + // Use the client, not peer, GeoIP data. In the case of in-proxy tunnel + // protocols, the client GeoIP fields will be populated using the + // original client IP already received, from the broker, in the handshake. + + p, err := tacticsCache.Get(sshClient.clientGeoIPData) if err != nil { log.WithTraceFields(LogFields{"error": err}).Warning("get tactics failed") return @@ -3873,10 +4113,19 @@ func (sshClient *sshClient) setTrafficRules() (int64, int64) { isFirstTunnelInSession := sshClient.isFirstTunnelInSession && sshClient.handshakeState.establishedTunnelsCount == 0 + // In the case of in-proxy tunnel protocols, the client GeoIP data is None + // until the handshake completes. Pre-handhake, the rate limit is + // determined by EstablishmentRead/WriteBytesPerSecond, which default to + // unthrottled, the recommended setting; in addition, no port forwards + // are permitted until after the handshake completes, at which time + // setTrafficRules will be called again with the client GeoIP data + // populated using the original client IP received from the in-proxy + // broker. + sshClient.trafficRules = sshClient.sshServer.support.TrafficRulesSet.GetTrafficRules( isFirstTunnelInSession, sshClient.tunnelProtocol, - sshClient.geoIPData, + sshClient.clientGeoIPData, sshClient.handshakeState) if sshClient.throttledConn != nil { @@ -3997,11 +4246,13 @@ func (sshClient *sshClient) isPortForwardPermitted( sshClient.enqueueDisallowedTrafficAlertRequest() - log.WithTraceFields( - LogFields{ - "type": portForwardType, - "port": port, - }).Debug("port forward denied by traffic rules") + if IsLogLevelDebug() { + log.WithTraceFields( + LogFields{ + "type": portForwardType, + "port": port, + }).Debug("port forward denied by traffic rules") + } return false } @@ -4019,6 +4270,13 @@ func (sshClient *sshClient) isDomainPermitted(domain string) (bool, string) { return false, "invalid domain name" } + // Don't even attempt to resolve the default mDNS top-level domain. + // Non-default cases won't be caught here but should fail to resolve due + // to the PreferGo setting in net.Resolver. + if strings.HasSuffix(domain, ".local") { + return false, "port forward not permitted" + } + tags := sshClient.sshServer.support.Blocklist.LookupDomain(domain) if len(tags) > 0 { @@ -4207,7 +4465,10 @@ func (sshClient *sshClient) establishedPortForward( if !sshClient.allocatePortForward(portForwardType) { portForwardLRU.CloseOldest() - log.WithTrace().Debug("closed LRU port forward") + + if IsLogLevelDebug() { + log.WithTrace().Debug("closed LRU port forward") + } state.availablePortForwardCond.L.Lock() for !sshClient.allocatePortForward(portForwardType) { @@ -4338,24 +4599,6 @@ func (sshClient *sshClient) handleTCPChannel( } }() - // Transparently redirect web API request connections. - - isWebServerPortForward := false - config := sshClient.sshServer.support.Config - if config.WebServerPortForwardAddress != "" { - destination := net.JoinHostPort(hostToConnect, strconv.Itoa(portToConnect)) - if destination == config.WebServerPortForwardAddress { - isWebServerPortForward = true - if config.WebServerPortForwardRedirectAddress != "" { - // Note: redirect format is validated when config is loaded - host, portStr, _ := net.SplitHostPort(config.WebServerPortForwardRedirectAddress) - port, _ := strconv.Atoi(portStr) - hostToConnect = host - portToConnect = port - } - } - } - // Validate the domain name and check the domain blocklist before dialing. // // The IP blocklist is checked in isPortForwardPermitted, which also provides @@ -4369,8 +4612,7 @@ func (sshClient *sshClient) handleTCPChannel( // handle DNS-over-TCP; in the DNS-over-TCP case, a client may bypass the // block list check. - if !isWebServerPortForward && - net.ParseIP(hostToConnect) == nil { + if net.ParseIP(hostToConnect) == nil { ok, rejectMessage := sshClient.isDomainPermitted(hostToConnect) if !ok { @@ -4397,10 +4639,19 @@ func (sshClient *sshClient) handleTCPChannel( // Resolve the hostname - log.WithTraceFields(LogFields{"hostToConnect": hostToConnect}).Debug("resolving") + // PreferGo, equivalent to GODEBUG=netdns=go, is specified in order to + // avoid any cases where Go's resolver fails over to the cgo-based + // resolver (see https://pkg.go.dev/net#hdr-Name_Resolution). Such + // cases, if they resolve at all, may be expected to resolve to bogon + // IPs that won't be permitted; but the cgo invocation will consume + // an OS thread, which is a performance hit we can avoid. + + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"hostToConnect": hostToConnect}).Debug("resolving") + } ctx, cancelCtx := context.WithTimeout(sshClient.runCtx, remainingDialTimeout) - IPs, err := (&net.Resolver{}).LookupIPAddr(ctx, hostToConnect) + IPs, err := (&net.Resolver{PreferGo: true}).LookupIPAddr(ctx, hostToConnect) cancelCtx() // "must be called or the new context will remain live until its parent context is cancelled" resolveElapsedTime := time.Since(dialStartTime) @@ -4480,7 +4731,13 @@ func (sshClient *sshClient) handleTCPChannel( destinationGeoIPData := sshClient.sshServer.support.GeoIPService.LookupIP(IP) - if sshClient.geoIPData.Country != GEOIP_UNKNOWN_VALUE && + // Use the client, not peer, GeoIP data. In the case of in-proxy tunnel + // protocols, the client GeoIP fields will be populated using the + // original client IP already received, from the broker, in the handshake. + + clientGeoIPData := sshClient.getClientGeoIPData() + + if clientGeoIPData.Country != GEOIP_UNKNOWN_VALUE && sshClient.handshakeState.splitTunnelLookup.lookup( destinationGeoIPData.Country) { @@ -4499,11 +4756,9 @@ func (sshClient *sshClient) handleTCPChannel( // Enforce traffic rules, using the resolved IP address. - if !isWebServerPortForward && - !sshClient.isPortForwardPermitted( - portForwardTypeTCP, - IP, - portToConnect) { + if !sshClient.isPortForwardPermitted( + portForwardTypeTCP, IP, portToConnect) { + // Note: not recording a port forward failure in this case sshClient.rejectNewChannel(newChannel, "port forward not permitted") return @@ -4513,7 +4768,9 @@ func (sshClient *sshClient) handleTCPChannel( remoteAddr := net.JoinHostPort(IP.String(), strconv.Itoa(portToConnect)) - log.WithTraceFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing") + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing") + } ctx, cancelCtx := context.WithTimeout(sshClient.runCtx, remainingDialTimeout) fwdConn, err := (&net.Dialer{}).DialContext(ctx, "tcp", remoteAddr) @@ -4590,7 +4847,9 @@ func (sshClient *sshClient) handleTCPChannel( // Relay channel to forwarded connection. - log.WithTraceFields(LogFields{"remoteAddr": remoteAddr}).Debug("relaying") + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"remoteAddr": remoteAddr}).Debug("relaying") + } // TODO: relay errors to fwdChannel.Stderr()? relayWaitGroup := new(sync.WaitGroup) @@ -4605,7 +4864,9 @@ func (sshClient *sshClient) handleTCPChannel( atomic.AddInt64(&bytesDown, bytes) if err != nil && err != io.EOF { // Debug since errors such as "connection reset by peer" occur during normal operation - log.WithTraceFields(LogFields{"error": err}).Debug("downstream TCP relay failed") + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"error": err}).Debug("downstream TCP relay failed") + } } // Interrupt upstream io.Copy when downstream is shutting down. // TODO: this is done to quickly cleanup the port forward when @@ -4617,7 +4878,9 @@ func (sshClient *sshClient) handleTCPChannel( fwdConn, fwdChannel, make([]byte, SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE)) atomic.AddInt64(&bytesUp, bytes) if err != nil && err != io.EOF { - log.WithTraceFields(LogFields{"error": err}).Debug("upstream TCP relay failed") + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"error": err}).Debug("upstream TCP relay failed") + } } // Shutdown special case: fwdChannel will be closed and return EOF when // the SSH connection is closed, but we need to explicitly close fwdConn @@ -4627,9 +4890,11 @@ func (sshClient *sshClient) handleTCPChannel( relayWaitGroup.Wait() - log.WithTraceFields( - LogFields{ - "remoteAddr": remoteAddr, - "bytesUp": atomic.LoadInt64(&bytesUp), - "bytesDown": atomic.LoadInt64(&bytesDown)}).Debug("exiting") + if IsLogLevelDebug() { + log.WithTraceFields( + LogFields{ + "remoteAddr": remoteAddr, + "bytesUp": atomic.LoadInt64(&bytesUp), + "bytesDown": atomic.LoadInt64(&bytesDown)}).Debug("exiting") + } } diff --git a/psiphon/server/udp.go b/psiphon/server/udp.go index 27dfe9987..f16fcb0bc 100644 --- a/psiphon/server/udp.go +++ b/psiphon/server/udp.go @@ -365,6 +365,12 @@ type udpgwPortForward struct { mux *udpgwPortForwardMultiplexer } +var udpgwBufferPool = &sync.Pool{ + New: func() any { + return make([]byte, udpgwProtocolMaxMessageSize) + }, +} + func (portForward *udpgwPortForward) relayDownstream() { defer portForward.relayWaitGroup.Done() defer portForward.mux.relayWaitGroup.Done() @@ -378,7 +384,13 @@ func (portForward *udpgwPortForward) relayDownstream() { // Note: there is one downstream buffer per UDP port forward, // while for upstream there is one buffer per client. // TODO: is the buffer size larger than necessary? - buffer := make([]byte, udpgwProtocolMaxMessageSize) + + // Use a buffer pool to minimize GC churn resulting from frequent, + // short-lived UDP flows, including DNS requests. + buffer := udpgwBufferPool.Get().([]byte) + clear(buffer) + defer udpgwBufferPool.Put(buffer) + packetBuffer := buffer[portForward.preambleSize:udpgwProtocolMaxMessageSize] for { // TODO: if read buffer is too small, excess bytes are discarded? @@ -389,7 +401,9 @@ func (portForward *udpgwPortForward) relayDownstream() { if err != nil { if err != io.EOF { // Debug since errors such as "use of closed network connection" occur during normal operation - log.WithTraceFields(LogFields{"error": err}).Debug("downstream UDP relay failed") + if IsLogLevelDebug() { + log.WithTraceFields(LogFields{"error": err}).Debug("downstream UDP relay failed") + } } break } diff --git a/psiphon/server/webServer.go b/psiphon/server/webServer.go deleted file mode 100644 index 6dade79b1..000000000 --- a/psiphon/server/webServer.go +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2016, Psiphon Inc. - * All rights reserved. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - */ - -package server - -import ( - "encoding/json" - "io/ioutil" - golanglog "log" - "net" - "net/http" - "strconv" - "sync" - "time" - - std_tls "crypto/tls" - - tls "github.com/Psiphon-Labs/psiphon-tls" - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" -) - -const WEB_SERVER_IO_TIMEOUT = 10 * time.Second - -type webServer struct { - support *SupportServices -} - -// RunWebServer runs a web server which supports tunneled and untunneled -// Psiphon API requests. -// -// The HTTP request handlers are light wrappers around the base Psiphon -// API request handlers from the SSH API transport. The SSH API transport -// is preferred by new clients. The web API transport provides support for -// older clients. -// -// The API is compatible with all tunnel-core clients but not backwards -// compatible with all legacy clients. -// -// Note: new features, including authorizations, are not supported in the -// web API transport. -func RunWebServer( - support *SupportServices, - shutdownBroadcast <-chan struct{}) error { - - webServer := &webServer{ - support: support, - } - - serveMux := http.NewServeMux() - serveMux.HandleFunc("/handshake", webServer.handshakeHandler) - serveMux.HandleFunc("/connected", webServer.connectedHandler) - serveMux.HandleFunc("/status", webServer.statusHandler) - serveMux.HandleFunc("/client_verification", webServer.clientVerificationHandler) - - certificate, err := tls.X509KeyPair( - []byte(support.Config.WebServerCertificate), - []byte(support.Config.WebServerPrivateKey)) - if err != nil { - return errors.Trace(err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{certificate}, - } - - // TODO: inherits global log config? - logWriter := NewLogWriter() - defer logWriter.Close() - - // Note: WriteTimeout includes time awaiting request, as per: - // https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts - - server := &HTTPSServer{ - &http.Server{ - MaxHeaderBytes: MAX_API_PARAMS_SIZE, - Handler: serveMux, - ReadTimeout: WEB_SERVER_IO_TIMEOUT, - WriteTimeout: WEB_SERVER_IO_TIMEOUT, - ErrorLog: golanglog.New(logWriter, "", 0), - - // Disable auto HTTP/2 (https://golang.org/doc/go1.6) - TLSNextProto: make(map[string]func(*http.Server, *std_tls.Conn, http.Handler)), - }, - } - - localAddress := net.JoinHostPort( - support.Config.ServerIPAddress, - strconv.Itoa(support.Config.WebServerPort)) - - listener, err := net.Listen("tcp", localAddress) - if err != nil { - return errors.Trace(err) - } - - log.WithTraceFields( - LogFields{"localAddress": localAddress}).Info("starting") - - err = nil - errorChannel := make(chan error) - waitGroup := new(sync.WaitGroup) - - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - - // Note: will be interrupted by listener.Close() - err := server.ServeTLS(listener, tlsConfig) - - // Can't check for the exact error that Close() will cause in Accept(), - // (see: https://code.google.com/p/go/issues/detail?id=4373). So using an - // explicit stop signal to stop gracefully. - select { - case <-shutdownBroadcast: - default: - if err != nil { - select { - case errorChannel <- errors.Trace(err): - default: - } - } - } - - log.WithTraceFields( - LogFields{"localAddress": localAddress}).Info("stopped") - }() - - select { - case <-shutdownBroadcast: - case err = <-errorChannel: - } - - listener.Close() - - waitGroup.Wait() - - log.WithTraceFields( - LogFields{"localAddress": localAddress}).Info("exiting") - - return err -} - -// convertHTTPRequestToAPIRequest converts the HTTP request query -// parameters and request body to the JSON object import format -// expected by the API request handlers. -func convertHTTPRequestToAPIRequest( - w http.ResponseWriter, - r *http.Request, - requestBodyName string) (common.APIParameters, error) { - - params := make(common.APIParameters) - - for name, values := range r.URL.Query() { - - // Limitations: - // - This is intended only to support params sent by legacy - // clients; non-base array-type params are not converted. - // - Only the first values per name is used. - - if len(values) > 0 { - value := values[0] - - // TODO: faster lookup? - isArray := false - for _, paramSpec := range baseSessionAndDialParams { - if paramSpec.name == name { - isArray = (paramSpec.flags&requestParamArray != 0) - break - } - } - - if isArray { - // Special case: a JSON encoded array - var arrayValue []interface{} - err := json.Unmarshal([]byte(value), &arrayValue) - if err != nil { - return nil, errors.Trace(err) - } - params[name] = arrayValue - } else { - // All other query parameters are simple strings - params[name] = value - } - } - } - - if requestBodyName != "" { - r.Body = http.MaxBytesReader(w, r.Body, MAX_API_PARAMS_SIZE) - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, errors.Trace(err) - } - var bodyParams map[string]interface{} - - if len(body) != 0 { - err = json.Unmarshal(body, &bodyParams) - if err != nil { - return nil, errors.Trace(err) - } - params[requestBodyName] = bodyParams - } - } - - return params, nil -} - -func (webServer *webServer) lookupGeoIPData(params common.APIParameters) GeoIPData { - - clientSessionID, err := getStringRequestParam(params, "client_session_id") - if err != nil { - // Not all clients send this parameter - return NewGeoIPData() - } - - return webServer.support.GeoIPService.GetSessionCache(clientSessionID) -} - -func (webServer *webServer) handshakeHandler(w http.ResponseWriter, r *http.Request) { - - params, err := convertHTTPRequestToAPIRequest(w, r, "") - - var responsePayload []byte - if err == nil { - responsePayload, err = dispatchAPIRequestHandler( - webServer.support, - protocol.PSIPHON_WEB_API_PROTOCOL, - r.RemoteAddr, - webServer.lookupGeoIPData(params), - nil, - protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME, - params) - } - - if err != nil { - log.WithTraceFields(LogFields{"error": err}).Warning("failed") - w.WriteHeader(http.StatusNotFound) - return - } - - // The legacy response format is newline separated, name prefixed values. - // Within that legacy format, the modern JSON response (containing all the - // legacy response values and more) is single value with a "Config:" prefix. - // This response uses the legacy format but omits all but the JSON value. - responseBody := append([]byte("Config: "), responsePayload...) - - w.WriteHeader(http.StatusOK) - w.Write(responseBody) -} - -func (webServer *webServer) connectedHandler(w http.ResponseWriter, r *http.Request) { - - params, err := convertHTTPRequestToAPIRequest(w, r, "") - - var responsePayload []byte - if err == nil { - responsePayload, err = dispatchAPIRequestHandler( - webServer.support, - protocol.PSIPHON_WEB_API_PROTOCOL, - r.RemoteAddr, - webServer.lookupGeoIPData(params), - nil, // authorizedAccessTypes not logged in web API transport - protocol.PSIPHON_API_CONNECTED_REQUEST_NAME, - params) - } - - if err != nil { - log.WithTraceFields(LogFields{"error": err}).Warning("failed") - w.WriteHeader(http.StatusNotFound) - return - } - - w.WriteHeader(http.StatusOK) - w.Write(responsePayload) -} - -func (webServer *webServer) statusHandler(w http.ResponseWriter, r *http.Request) { - - params, err := convertHTTPRequestToAPIRequest(w, r, "statusData") - - var responsePayload []byte - if err == nil { - responsePayload, err = dispatchAPIRequestHandler( - webServer.support, - protocol.PSIPHON_WEB_API_PROTOCOL, - r.RemoteAddr, - webServer.lookupGeoIPData(params), - nil, // authorizedAccessTypes not logged in web API transport - protocol.PSIPHON_API_STATUS_REQUEST_NAME, - params) - } - - if err != nil { - log.WithTraceFields(LogFields{"error": err}).Warning("failed") - w.WriteHeader(http.StatusNotFound) - return - } - - w.WriteHeader(http.StatusOK) - w.Write(responsePayload) -} - -// clientVerificationHandler is kept for compliance with older Android clients -func (webServer *webServer) clientVerificationHandler(w http.ResponseWriter, r *http.Request) { - - params, err := convertHTTPRequestToAPIRequest(w, r, "verificationData") - - var responsePayload []byte - if err == nil { - responsePayload, err = dispatchAPIRequestHandler( - webServer.support, - protocol.PSIPHON_WEB_API_PROTOCOL, - r.RemoteAddr, - webServer.lookupGeoIPData(params), - nil, // authorizedAccessTypes not logged in web API transport - protocol.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME, - params) - } - - if err != nil { - log.WithTraceFields(LogFields{"error": err}).Warning("failed") - w.WriteHeader(http.StatusNotFound) - return - } - - w.WriteHeader(http.StatusOK) - w.Write(responsePayload) -} diff --git a/psiphon/serverApi.go b/psiphon/serverApi.go index b8c1b59cc..2654c5bf7 100644 --- a/psiphon/serverApi.go +++ b/psiphon/serverApi.go @@ -37,8 +37,10 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/buildinfo" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/ssh" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/fragmentor" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" @@ -84,7 +86,7 @@ func NewServerContext(tunnel *Tunnel) (*ServerContext, error) { // accessing the Psiphon API via the web service. var psiphonHttpsClient *http.Client if !tunnel.dialParams.ServerEntry.SupportsSSHAPIRequests() || - tunnel.config.TargetApiProtocol == protocol.PSIPHON_WEB_API_PROTOCOL { + tunnel.config.TargetAPIProtocol == protocol.PSIPHON_API_PROTOCOL_WEB { var err error psiphonHttpsClient, err = makePsiphonHttpsClient(tunnel) @@ -113,10 +115,9 @@ func NewServerContext(tunnel *Tunnel) (*ServerContext, error) { // doHandshakeRequest performs the "handshake" API request. The handshake // returns upgrade info, newly discovered server entries -- which are // stored -- and sponsor info (home pages, stat regexes). -func (serverContext *ServerContext) doHandshakeRequest( - ignoreStatsRegexps bool) error { +func (serverContext *ServerContext) doHandshakeRequest(ignoreStatsRegexps bool) error { - params := serverContext.getBaseAPIParameters(baseParametersAll) + params := serverContext.getBaseAPIParameters(baseParametersAll, false) // The server will return a signed copy of its own server entry when the // client specifies this 'missing_server_entry_signature' parameter. @@ -196,6 +197,20 @@ func (serverContext *ServerContext) doHandshakeRequest( params["split_tunnel_regions"] = serverContext.tunnel.config.SplitTunnelRegions } + // Add the in-proxy broker/server relay packet, which contains either the + // immediate broker report payload, for established sessions, or a new + // session handshake packet. The broker report securely relays the + // original client IP and the relaying proxy ID to the Psiphon server. + // inproxy_relay_packet is a required field for in-proxy tunnel protocols. + if protocol.TunnelProtocolUsesInproxy(serverContext.tunnel.dialParams.TunnelProtocol) { + inproxyConn := serverContext.tunnel.dialParams.inproxyConn.Load() + if inproxyConn != nil { + packet := base64.RawStdEncoding.EncodeToString( + inproxyConn.(*inproxy.ClientConn).InitialRelayPacket()) + params["inproxy_relay_packet"] = packet + } + } + var response []byte if serverContext.psiphonHttpsClient == nil { @@ -251,7 +266,11 @@ func (serverContext *ServerContext) doHandshakeRequest( return errors.Trace(err) } - if serverContext.tunnel.config.EmitClientAddress { + // Limitation: ClientAddress is not supported for in-proxy tunnel + // protocols; see comment in server.handshakeAPIRequestHandler. + if serverContext.tunnel.config.EmitClientAddress && + !protocol.TunnelProtocolUsesInproxy(serverContext.tunnel.dialParams.TunnelProtocol) { + NoticeClientAddress(handshakeResponse.ClientAddress) } @@ -472,7 +491,7 @@ func (serverContext *ServerContext) DoConnectedRequest() error { defer serverContext.tunnel.SetInFlightConnectedRequest(nil) params := serverContext.getBaseAPIParameters( - baseParametersOnlyUpstreamFragmentorDialParameters) + baseParametersOnlyUpstreamFragmentorDialParameters, false) lastConnected, err := getLastConnected() if err != nil { @@ -544,7 +563,8 @@ func (serverContext *ServerContext) StatsRegexps() *transferstats.Regexps { // DoStatusRequest makes a "status" API request to the server, sending session stats. func (serverContext *ServerContext) DoStatusRequest(tunnel *Tunnel) error { - params := serverContext.getBaseAPIParameters(baseParametersNoDialParameters) + params := serverContext.getBaseAPIParameters( + baseParametersNoDialParameters, false) // Note: ensure putBackStatusRequestPayload is called, to replace // payload for future attempt, in all failure cases. @@ -624,6 +644,12 @@ func makeStatusRequestPayload( config *Config, serverId string) ([]byte, *statusRequestPayloadInfo, error) { + // The status request payload is always JSON encoded. As it is sent after + // the initial handshake and is multiplexed with other tunnel traffic, + // its size is less of a fingerprinting concern. + // + // TODO: pack and CBOR encode the status request payload. + transferStats := transferstats.TakeOutStatsForServer(serverId) hostBytes := transferStats.GetStatsForStatusRequest() @@ -822,7 +848,7 @@ func RecordFailedTunnelStat( return errors.Trace(err) } - params := getBaseAPIParameters(baseParametersAll, config, dialParams) + params := getBaseAPIParameters(baseParametersAll, true, config, dialParams) delete(params, "server_secret") params["server_entry_tag"] = dialParams.ServerEntry.Tag @@ -918,14 +944,30 @@ func (serverContext *ServerContext) doPostRequest( return responseBody, nil } -// makeSSHAPIRequestPayload makes a JSON payload for an SSH API request. +// makeSSHAPIRequestPayload makes an encoded payload for an SSH API request. func (serverContext *ServerContext) makeSSHAPIRequestPayload( params common.APIParameters) ([]byte, error) { - jsonPayload, err := json.Marshal(params) + + // CBOR encoding is the default and is preferred as its smaller size gives + // more space for variable padding to mitigate potential fingerprinting + // based on API message sizes. + + if !serverContext.tunnel.dialParams.ServerEntry.SupportsSSHAPIRequests() || + serverContext.tunnel.config.TargetAPIEncoding == protocol.PSIPHON_API_ENCODING_JSON { + + jsonPayload, err := json.Marshal(params) + if err != nil { + return nil, errors.Trace(err) + } + return jsonPayload, nil + } + + payload, err := protocol.MakePackedAPIParametersRequestPayload(params) if err != nil { return nil, errors.Trace(err) } - return jsonPayload, nil + + return payload, nil } type baseParametersFilter int @@ -937,10 +979,12 @@ const ( ) func (serverContext *ServerContext) getBaseAPIParameters( - filter baseParametersFilter) common.APIParameters { + filter baseParametersFilter, + includeSessionID bool) common.APIParameters { params := getBaseAPIParameters( filter, + includeSessionID, serverContext.tunnel.config, serverContext.tunnel.dialParams) @@ -970,22 +1014,60 @@ func (serverContext *ServerContext) getBaseAPIParameters( // getBaseAPIParameters returns all the common API parameters that are // included with each Psiphon API request. These common parameters are used // for metrics. +// +// The input dialPatrams may be nil when the filter has +// baseParametersNoDialParameters. func getBaseAPIParameters( filter baseParametersFilter, + includeSessionID bool, config *Config, dialParams *DialParameters) common.APIParameters { params := make(common.APIParameters) + // Temporary measure: unconditionally include legacy session_id and + // client_session_id fields for compatibility with existing servers used + // in CI. + // + // TODO: remove once necessary servers are upgraded params["session_id"] = config.SessionID params["client_session_id"] = config.SessionID - params["server_secret"] = dialParams.ServerEntry.WebServerSecret + + if includeSessionID { + // The session ID is included in non-SSH API requests only. For SSH + // API requests, the Psiphon server already has the client's session ID. + params["session_id"] = config.SessionID + } params["propagation_channel_id"] = config.PropagationChannelId params["sponsor_id"] = config.GetSponsorID() params["client_version"] = config.ClientVersion params["client_platform"] = config.ClientPlatform params["client_features"] = config.clientFeatures params["client_build_rev"] = buildinfo.GetBuildInfo().BuildRev + if dialParams != nil { + // Prefer the dialParams network ID snapshot if available. + params["network_type"] = dialParams.GetNetworkType() + } else { + params["network_type"] = GetNetworkType(config.GetNetworkID()) + } + // TODO: snapshot tactics tag used when dialParams initialized. + params[tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME] = + config.GetParameters().Get().Tag() + + // The server secret is deprecated and included only in legacy JSON + // encoded API messages for backwards compatibility. SSH login proves + // client possession of the server entry; the server secret was for the + // legacy web API with no SSH login. Note that we can't check + // dialParams.ServerEntry in the baseParametersNoDialParameters case, but + // that case is used by in-proxy dials, which implies support. + + if dialParams != nil { + if !dialParams.ServerEntry.SupportsSSHAPIRequests() || + config.TargetAPIEncoding == protocol.PSIPHON_API_ENCODING_JSON { + + params["server_secret"] = dialParams.ServerEntry.WebServerSecret + } + } // Blank parameters must be omitted. @@ -998,8 +1080,15 @@ func getBaseAPIParameters( if filter == baseParametersAll { + if protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) { + inproxyConn := dialParams.inproxyConn.Load() + if inproxyConn != nil { + params["inproxy_connection_id"] = + inproxyConn.(*inproxy.ClientConn).GetConnectionID() + } + } + params["relay_protocol"] = dialParams.TunnelProtocol - params["network_type"] = dialParams.GetNetworkType() if dialParams.BPFProgramName != "" { params["client_bpf"] = dialParams.BPFProgramName @@ -1090,9 +1179,6 @@ func getBaseAPIParameters( params["server_entry_timestamp"] = localServerEntryTimestamp } - params[tactics.APPLIED_TACTICS_TAG_PARAMETER_NAME] = - config.GetParameters().Get().Tag() - if dialParams.DialPortNumber != "" { params["dial_port_number"] = dialParams.DialPortNumber } @@ -1234,6 +1320,13 @@ func getBaseAPIParameters( } } + if protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) { + metrics := dialParams.GetInproxyMetrics() + for name, value := range metrics { + params[name] = fmt.Sprintf("%v", value) + } + } + } else if filter == baseParametersOnlyUpstreamFragmentorDialParameters { if dialParams.DialConnMetrics != nil { @@ -1343,23 +1436,36 @@ func makePsiphonHttpsClient(tunnel *Tunnel) (httpsClient *http.Client, err error } func HandleServerRequest( - tunnelOwner TunnelOwner, tunnel *Tunnel, name string, payload []byte) error { + tunnelOwner TunnelOwner, tunnel *Tunnel, request *ssh.Request) { + + var err error - switch name { + switch request.Type { case protocol.PSIPHON_API_OSL_REQUEST_NAME: - return HandleOSLRequest(tunnelOwner, tunnel, payload) + err = HandleOSLRequest(tunnelOwner, tunnel, request) case protocol.PSIPHON_API_ALERT_REQUEST_NAME: - return HandleAlertRequest(tunnelOwner, tunnel, payload) + err = HandleAlertRequest(tunnelOwner, tunnel, request) + default: + err = errors.Tracef("invalid request name") } - return errors.Tracef("invalid request name: %s", name) + if err != nil { + NoticeWarning( + "HandleServerRequest for %s failed: %s", request.Type, errors.Trace(err)) + } } func HandleOSLRequest( - tunnelOwner TunnelOwner, tunnel *Tunnel, payload []byte) error { + tunnelOwner TunnelOwner, tunnel *Tunnel, request *ssh.Request) (retErr error) { + + defer func() { + if retErr != nil { + request.Reply(false, nil) + } + }() var oslRequest protocol.OSLRequest - err := json.Unmarshal(payload, &oslRequest) + err := json.Unmarshal(request.Payload, &oslRequest) if err != nil { return errors.Trace(err) } @@ -1388,14 +1494,22 @@ func HandleOSLRequest( tunnelOwner.SignalSeededNewSLOK() } + request.Reply(true, nil) + return nil } func HandleAlertRequest( - tunnelOwner TunnelOwner, tunnel *Tunnel, payload []byte) error { + tunnelOwner TunnelOwner, tunnel *Tunnel, request *ssh.Request) (retErr error) { + + defer func() { + if retErr != nil { + request.Reply(false, nil) + } + }() var alertRequest protocol.AlertRequest - err := json.Unmarshal(payload, &alertRequest) + err := json.Unmarshal(request.Payload, &alertRequest) if err != nil { return errors.Trace(err) } @@ -1404,5 +1518,7 @@ func HandleAlertRequest( NoticeServerAlert(alertRequest) } + request.Reply(true, nil) + return nil } diff --git a/psiphon/sessionTicket_test.go b/psiphon/sessionTicket_test.go old mode 100755 new mode 100644 diff --git a/psiphon/socksProxy.go b/psiphon/socksProxy.go index 5ebe2f252..566074fbe 100644 --- a/psiphon/socksProxy.go +++ b/psiphon/socksProxy.go @@ -38,7 +38,7 @@ type SocksProxy struct { tunneler Tunneler listener *socks.SocksListener serveWaitGroup *sync.WaitGroup - openConns *common.Conns + openConns *common.Conns[net.Conn] stopListeningBroadcast chan struct{} } @@ -65,7 +65,7 @@ func NewSocksProxy( tunneler: tunneler, listener: socks.NewSocksListener(listener), serveWaitGroup: new(sync.WaitGroup), - openConns: common.NewConns(), + openConns: common.NewConns[net.Conn](), stopListeningBroadcast: make(chan struct{}), } proxy.serveWaitGroup.Add(1) diff --git a/psiphon/tactics.go b/psiphon/tactics.go index 95bafd6c0..c7913287a 100755 --- a/psiphon/tactics.go +++ b/psiphon/tactics.go @@ -221,6 +221,8 @@ func fetchTactics( canReplay, selectProtocol, serverEntry, + nil, + nil, true, 0, 0) @@ -269,7 +271,7 @@ func fetchTactics( defer meekConn.Close() apiParams := getBaseAPIParameters( - baseParametersAll, config, dialParams) + baseParametersAll, true, config, dialParams) tacticsRecord, err := tactics.FetchTactics( ctx, diff --git a/psiphon/tactics_test.go b/psiphon/tactics_test.go index 7e3e2d37d..0c8c14474 100644 --- a/psiphon/tactics_test.go +++ b/psiphon/tactics_test.go @@ -47,7 +47,10 @@ func TestStandAloneGetTactics(t *testing.T) { } var modifyConfig map[string]interface{} - json.Unmarshal(configJSON, &modifyConfig) + err = json.Unmarshal(configJSON, &modifyConfig) + if err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } modifyConfig["DataRootDirectory"] = testDataDirName diff --git a/psiphon/tlsDialer.go b/psiphon/tlsDialer.go index 7b99db602..8333c360a 100644 --- a/psiphon/tlsDialer.go +++ b/psiphon/tlsDialer.go @@ -250,18 +250,18 @@ func CustomTLSDial( dialAddr = config.DialAddr } - rawConn, err := config.Dial(ctx, network, dialAddr) + underlyingConn, err := config.Dial(ctx, network, dialAddr) if err != nil { return nil, errors.Trace(err) } if config.FragmentClientHello { - rawConn = NewTLSFragmentorConn(rawConn) + underlyingConn = NewTLSFragmentorConn(underlyingConn) } hostname, _, err := net.SplitHostPort(dialAddr) if err != nil { - rawConn.Close() + underlyingConn.Close() return nil, errors.Trace(err) } @@ -425,7 +425,7 @@ func CustomTLSDial( tlsConfig.DynamicRecordSizingDisabled = true } - conn := utls.UClient(rawConn, tlsConfig, utlsClientHelloID) + conn := utls.UClient(underlyingConn, tlsConfig, utlsClientHelloID) if utlsClientHelloSpec != nil { err := conn.ApplyPreset(utlsClientHelloSpec) @@ -629,16 +629,35 @@ func CustomTLSDial( case <-ctx.Done(): err = ctx.Err() // Interrupt the goroutine - rawConn.Close() + underlyingConn.Close() <-resultChannel } if err != nil { - rawConn.Close() + underlyingConn.Close() return nil, errors.Trace(err) } - return conn, nil + return &tlsConn{ + Conn: conn, + underlyingConn: underlyingConn}, nil +} + +type tlsConn struct { + net.Conn + underlyingConn net.Conn +} + +func (conn *tlsConn) GetMetrics() common.LogFields { + logFields := make(common.LogFields) + + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := conn.underlyingConn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + return logFields } func verifyLegacyCertificate(rawCerts [][]byte, expectedCertificate *x509.Certificate) error { @@ -704,10 +723,12 @@ func verifyCertificatePins(pins []string, verifiedChains [][]*x509.Certificate) } func IsTLSConnUsingHTTP2(conn net.Conn) bool { - if c, ok := conn.(*utls.UConn); ok { - state := c.ConnectionState() - return state.NegotiatedProtocolIsMutual && - state.NegotiatedProtocol == "h2" + if t, ok := conn.(*tlsConn); ok { + if u, ok := t.Conn.(*utls.UConn); ok { + state := u.ConnectionState() + return state.NegotiatedProtocolIsMutual && + state.NegotiatedProtocol == "h2" + } } return false } @@ -1040,6 +1061,18 @@ func (c *TLSFragmentorConn) Read(b []byte) (n int, err error) { return c.Conn.Read(b) } +func (c *TLSFragmentorConn) GetMetrics() common.LogFields { + logFields := make(common.LogFields) + + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. + underlyingMetrics, ok := c.Conn.(common.MetricsSource) + if ok { + logFields.Add(underlyingMetrics.GetMetrics()) + } + return logFields +} + // Write transparently splits the first TLS record containing ClientHello into // two fragments and writes them separately to the underlying conn. // The second fragment contains the data portion of the SNI extension (i.e. the server name). diff --git a/psiphon/tlsDialer_test.go b/psiphon/tlsDialer_test.go index 982b294c4..0d6662757 100644 --- a/psiphon/tlsDialer_test.go +++ b/psiphon/tlsDialer_test.go @@ -493,7 +493,7 @@ func testTLSDialerCompatibility(t *testing.T, address string, fragmentClientHell // Same tls config as psiphon/server/meek.go - certificate, privateKey, err := common.GenerateWebServerCertificate(values.GetHostName()) + certificate, privateKey, _, err := common.GenerateWebServerCertificate(values.GetHostName()) if err != nil { t.Fatalf("common.GenerateWebServerCertificate failed: %v", err) } @@ -581,7 +581,7 @@ func testTLSDialerCompatibility(t *testing.T, address string, fragmentClientHell } else { tlsVersion := "" - version := conn.(*utls.UConn).ConnectionState().Version + version := conn.(*tlsConn).Conn.(*utls.UConn).ConnectionState().Version if version == utls.VersionTLS12 { tlsVersion = "TLS 1.2" } else if version == utls.VersionTLS13 { @@ -935,7 +935,7 @@ func makeCustomTLSProfilesParameters( applyParameters[parameters.DisableFrontingProviderTLSProfiles] = disabledTLSProfiles } - _, err = params.Set("", false, applyParameters) + _, err = params.Set("", 0, applyParameters) if err != nil { t.Fatalf("Set failed: %v", err) } diff --git a/psiphon/tlsTunnelConn.go b/psiphon/tlsTunnelConn.go index de4939dd8..b7cdcc018 100644 --- a/psiphon/tlsTunnelConn.go +++ b/psiphon/tlsTunnelConn.go @@ -162,10 +162,8 @@ func (conn *TLSTunnelConn) GetMetrics() common.LogFields { logFields["tls_padding"] = conn.tlsPadding - // Include metrics, such as fragmentor metrics, from the underlying dial - // conn. Properties of subsequent underlying dial conns are not reflected - // in these metrics; we assume that the first dial conn, which most likely - // transits the various protocol handshakes, is most significant. + // Include metrics, such as inproxy and fragmentor metrics, from the + // underlying dial conn. underlyingMetrics, ok := conn.Conn.(common.MetricsSource) if ok { logFields.Add(underlyingMetrics.GetMetrics()) diff --git a/psiphon/tunnel.go b/psiphon/tunnel.go index bec851e72..1b404a87a 100644 --- a/psiphon/tunnel.go +++ b/psiphon/tunnel.go @@ -31,6 +31,7 @@ import ( "io/ioutil" "net" "net/http" + "strconv" "sync" "sync/atomic" "time" @@ -38,6 +39,9 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/ssh" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/fragmentor" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" + inproxy_dtls "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy/dtls" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/obfuscator" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" @@ -46,6 +50,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/refraction" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/transferstats" + "github.com/fxamacker/cbor/v2" ) // Tunneler specifies the interface required by components that use tunnels. @@ -215,13 +220,24 @@ func (tunnel *Tunnel) Activate( // request. At this point, there is no operateTunnel monitor that will detect // this condition with SSH keep alives. - timeout := tunnel.getCustomParameters().Duration( - parameters.PsiphonAPIRequestTimeout) + doInproxy := protocol.TunnelProtocolUsesInproxy(tunnel.dialParams.TunnelProtocol) + var timeoutParameter string + if doInproxy { + // Optionally allow more time in case the broker/server relay + // requires additional round trips to establish a new session. + timeoutParameter = parameters.InproxyPsiphonAPIRequestTimeout + } else { + timeoutParameter = parameters.PsiphonAPIRequestTimeout + } + timeout := tunnel.getCustomParameters().Duration(timeoutParameter) + + var handshakeCtx context.Context + var cancelFunc context.CancelFunc if timeout > 0 { - var cancelFunc context.CancelFunc - ctx, cancelFunc = context.WithTimeout(ctx, timeout) - defer cancelFunc() + handshakeCtx, cancelFunc = context.WithTimeout(ctx, timeout) + } else { + handshakeCtx, cancelFunc = context.WithCancel(ctx) } type newServerContextResult struct { @@ -231,7 +247,48 @@ func (tunnel *Tunnel) Activate( resultChannel := make(chan newServerContextResult) + wg := new(sync.WaitGroup) + + if doInproxy { + + // Launch a handler to handle broker/server relay SSH requests, + // which will occur when the broker needs to establish a new + // session with the server. + wg.Add(1) + go func() { + defer wg.Done() + notice := true + select { + case serverRequest := <-tunnel.sshServerRequests: + if serverRequest != nil { + if serverRequest.Type == protocol.PSIPHON_API_INPROXY_RELAY_REQUEST_NAME { + + if notice { + NoticeInfo( + "relaying inproxy broker packets for %s", + tunnel.dialParams.ServerEntry.GetDiagnosticID()) + notice = false + } + tunnel.relayInproxyPacketRoundTrip(handshakeCtx, serverRequest) + + } else { + + // There's a potential race condition in which + // post-handshake SSH requests, such as OSL or + // alert requests, arrive to this handler instead + // of operateTunnel, so invoke HandleServerRequest here. + HandleServerRequest(tunnelOwner, tunnel, serverRequest) + } + } + case <-handshakeCtx.Done(): + return + } + }() + } + + wg.Add(1) go func() { + defer wg.Done() serverContext, err := NewServerContext(tunnel) resultChannel <- newServerContextResult{ serverContext: serverContext, @@ -243,13 +300,16 @@ func (tunnel *Tunnel) Activate( select { case result = <-resultChannel: - case <-ctx.Done(): - result.err = ctx.Err() + case <-handshakeCtx.Done(): + result.err = handshakeCtx.Err() // Interrupt the goroutine tunnel.Close(true) <-resultChannel } + cancelFunc() + wg.Wait() + if result.err != nil { return errors.Trace(result.err) } @@ -298,6 +358,55 @@ func (tunnel *Tunnel) Activate( return nil } +func (tunnel *Tunnel) relayInproxyPacketRoundTrip( + ctx context.Context, request *ssh.Request) (retErr error) { + + defer func() { + if retErr != nil { + request.Reply(false, nil) + } + }() + + // Continue the broker/server relay started in handshake round trip. + + // server -> broker + + var relayRequest protocol.InproxyRelayRequest + err := cbor.Unmarshal(request.Payload, &relayRequest) + + inproxyConn := tunnel.dialParams.inproxyConn.Load().(*inproxy.ClientConn) + if inproxyConn == nil { + return errors.TraceNew("missing inproxyConn") + } + + responsePacket, err := inproxyConn.RelayPacket(ctx, relayRequest.Packet) + if err != nil { + return errors.Trace(err) + } + + // RelayPacket may return a nil packet when the relay is complete. + if responsePacket == nil { + return nil + } + + // broker -> server + + relayResponse := &protocol.InproxyRelayResponse{ + Packet: responsePacket, + } + responsePayload, err := protocol.CBOREncoding.Marshal(relayResponse) + if err != nil { + return errors.Trace(err) + } + + err = request.Reply(true, responsePayload) + if err != nil { + return errors.Trace(err) + } + + return nil +} + // Close stops operating the tunnel and closes the underlying connection. // Supports multiple and/or concurrent calls to Close(). // When isDiscarded is set, operateTunnel will not attempt to send final @@ -752,20 +861,7 @@ func dialTunnel( var dialConn net.Conn - if protocol.TunnelProtocolUsesTLSOSSH(dialParams.TunnelProtocol) { - - dialConn, err = DialTLSTunnel( - ctx, - dialParams.GetTLSOSSHConfig(config), - dialParams.GetDialConfig(), - tlsOSSHApplyTrafficShaping, - tlsOSSHMinTLSPadding, - tlsOSSHMaxTLSPadding) - if err != nil { - return nil, errors.Trace(err) - } - - } else if protocol.TunnelProtocolUsesMeek(dialParams.TunnelProtocol) { + if protocol.TunnelProtocolUsesMeek(dialParams.TunnelProtocol) { dialConn, err = DialMeek( ctx, @@ -777,10 +873,54 @@ func dialTunnel( } else if protocol.TunnelProtocolUsesQUIC(dialParams.TunnelProtocol) { - packetConn, remoteAddr, err := NewUDPConn( - ctx, "udp", false, "", dialParams.DirectDialAddress, dialParams.GetDialConfig()) - if err != nil { - return nil, errors.Trace(err) + var packetConn net.PacketConn + var remoteAddr *net.UDPAddr + + // Special case: explict in-proxy dial. TCP dials wire up in-proxy + // dials via DialConfig and its CustomDialer using + // makeInproxyTCPDialer. common/quic doesn't have an equivilent to + // CustomDialer. + + if protocol.TunnelProtocolUsesInproxy(dialParams.TunnelProtocol) { + + packetConn, err = dialInproxy(ctx, config, dialParams) + if err != nil { + return nil, errors.Trace(err) + } + + // Use the actual 2nd hop destination address as the remote + // address for correct behavior in + // common/quic.getMaxPreDiscoveryPacketSize, which differs for + // IPv4 vs. IPv6 destination addresses; and + // ObfuscatedPacketConn.RemoteAddr. The 2nd hop destination + // address is not actually dialed. + // + // Limitation: for domain destinations, the in-proxy proxy + // resolves the domain, so just assume IPv6, which has lower max + // padding(see quic.getMaxPreDiscoveryPacketSize), and use a stub + // address. + + host, portStr, err := net.SplitHostPort(dialParams.DirectDialAddress) + if err != nil { + return nil, errors.Trace(err) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.Trace(err) + } + IP := net.ParseIP(host) + if IP == nil { + IP = net.ParseIP("fd00::") + } + remoteAddr = &net.UDPAddr{IP: IP, Port: port} + + } else { + + packetConn, remoteAddr, err = NewUDPConn( + ctx, "udp", false, "", dialParams.DirectDialAddress, dialParams.GetDialConfig()) + if err != nil { + return nil, errors.Trace(err) + } } dialConn, err = quic.Dial( @@ -812,150 +952,36 @@ func dialTunnel( } else if protocol.TunnelProtocolUsesConjure(dialParams.TunnelProtocol) { - // Specify a cache key with a scope that ensures that: - // - // (a) cached registrations aren't used across different networks, as a - // registration requires the client's public IP to match the value at time - // of registration; - // - // (b) cached registrations are associated with specific Psiphon server - // candidates, to ensure that replay will use the same phantom IP(s). - // - // This scheme allows for reuse of cached registrations on network A when a - // client roams from network A to network B and back to network A. - // - // Using the network ID as a proxy for client public IP address is a - // heurisitic: it's possible that a clients public IP address changes - // without the network ID changing, and it's not guaranteed that the client - // will be assigned the original public IP on network A; so there's some - // chance the registration cannot be reused. - - diagnosticID := dialParams.ServerEntry.GetDiagnosticID() - - cacheKey := dialParams.NetworkID + "-" + diagnosticID - - conjureConfig := &refraction.ConjureConfig{ - RegistrationCacheTTL: dialParams.ConjureCachedRegistrationTTL, - RegistrationCacheKey: cacheKey, - EnableIPv6Dials: conjureEnableIPv6Dials, - EnablePortRandomization: conjureEnablePortRandomization, - EnableRegistrationOverrides: conjureEnableRegistrationOverrides, - Transport: dialParams.ConjureTransport, - STUNServerAddress: dialParams.ConjureSTUNServerAddress, - DTLSEmptyInitialPacket: dialParams.ConjureDTLSEmptyInitialPacket, - DiagnosticID: diagnosticID, - Logger: NoticeCommonLogger(), - } - - // Set extraFailureAction, which is invoked whenever the tunnel fails (i.e., - // where RecordFailedTunnelStat is invoked). The action will remove any - // cached registration. When refraction.DialConjure succeeds, the underlying - // registration is cached. After refraction.DialConjure returns, it no - // longer modifies the cached state of that registration, assuming that it - // remains valid and effective. However adversarial impact on a given - // phantom IP may not become evident until after the initial TCP connection - // establishment and handshake performed by refraction.DialConjure. For - // example, it may be that the phantom dial is targeted for severe - // throttling which begins or is only evident later in the flow. Scheduling - // a call to DeleteCachedConjureRegistration allows us to invalidate the - // cached registration for a tunnel that fails later in its lifecycle. - // - // Note that extraFailureAction will retain a reference to conjureConfig for - // the lifetime of the tunnel. - extraFailureAction = func() { - refraction.DeleteCachedConjureRegistration(conjureConfig) + dialConn, extraFailureAction, err = dialConjure( + ctx, + config, + dialParams, + conjureEnableIPv6Dials, + conjureEnablePortRandomization, + conjureEnableRegistrationOverrides) + if err != nil { + return nil, errors.Trace(err) } - if dialParams.ConjureAPIRegistration { - - // Use MeekConn to domain front Conjure API registration. - // - // ConjureAPIRegistrarFrontingSpecs are applied via - // dialParams.GetMeekConfig, and will be subject to replay. - // - // Since DialMeek will create a TLS connection immediately, and a cached - // registration may be used, we will delay initializing the MeekConn-based - // RoundTripper until we know it's needed. This is implemented by passing - // in a RoundTripper that establishes a MeekConn when RoundTrip is called. - // - // In refraction.dial we configure 0 retries for API registration requests, - // assuming it's better to let another Psiphon candidate retry, with new - // domaing fronting parameters. As such, we expect only one round trip call - // per NewHTTPRoundTripper, so, in practise, there's no performance penalty - // from establishing a new MeekConn per round trip. - // - // Performing the full DialMeek/RoundTrip operation here allows us to call - // MeekConn.Close and ensure all resources are immediately cleaned up. - roundTrip := func(request *http.Request) (*http.Response, error) { - - conn, err := DialMeek( - ctx, dialParams.GetMeekConfig(), dialParams.GetDialConfig()) - if err != nil { - return nil, errors.Trace(err) - } - defer conn.Close() - - response, err := conn.RoundTrip(request) - if err != nil { - return nil, errors.Trace(err) - } + } else if protocol.TunnelProtocolUsesTLSOSSH(dialParams.TunnelProtocol) { - // Read the response into a buffer and close the response - // body, ensuring that MeekConn.Close closes all idle connections. - // - // Alternatively, we could Clone the request to set - // http.Request.Close and avoid keeping any idle connection - // open after the response body is read by gotapdance. Since - // the response body is small and since gotapdance does not - // stream the response body, we're taking this approach which - // ensures cleanup. - - body, err := ioutil.ReadAll(response.Body) - _ = response.Body.Close() - if err != nil { - return nil, errors.Trace(err) - } - response.Body = io.NopCloser(bytes.NewReader(body)) - - return response, nil - } - - conjureConfig.APIRegistrarHTTPClient = &http.Client{ - Transport: common.NewHTTPRoundTripper(roundTrip), - } - - conjureConfig.APIRegistrarBidirectionalURL = - dialParams.ConjureAPIRegistrarBidirectionalURL - conjureConfig.APIRegistrarDelay = dialParams.ConjureAPIRegistrarDelay - - } else if dialParams.ConjureDecoyRegistration { - - // The Conjure "phantom" connection is compatible with fragmentation, but - // the decoy registrar connection, like Tapdance, is not, so force it off. - // Any tunnel fragmentation metrics will refer to the "phantom" connection - // only. - conjureConfig.DoDecoyRegistration = true - conjureConfig.DecoyRegistrarWidth = dialParams.ConjureDecoyRegistrarWidth - conjureConfig.DecoyRegistrarDelay = dialParams.ConjureDecoyRegistrarDelay - } - - dialConn, err = refraction.DialConjure( + dialConn, err = DialTLSTunnel( ctx, - config.EmitRefractionNetworkingLogs, - config.GetPsiphonDataDirectory(), - NewRefractionNetworkingDialer(dialParams.GetDialConfig()).DialContext, - dialParams.DirectDialAddress, - conjureConfig) + dialParams.GetTLSOSSHConfig(config), + dialParams.GetDialConfig(), + tlsOSSHApplyTrafficShaping, + tlsOSSHMinTLSPadding, + tlsOSSHMaxTLSPadding) if err != nil { return nil, errors.Trace(err) } } else { - dialConn, err = DialTCP( - ctx, - dialParams.DirectDialAddress, - dialParams.GetDialConfig()) + // Use NewTCPDialer and don't use DialTCP directly, to ensure that + // dialParams.GetDialConfig()CustomDialer is applied. + tcpDialer := NewTCPDialer(dialParams.GetDialConfig()) + dialConn, err = tcpDialer(ctx, "tcp", dialParams.DirectDialAddress) if err != nil { return nil, errors.Trace(err) } @@ -1031,6 +1057,13 @@ func dialTunnel( return false }, HostKeyFallback: func(addr string, remote net.Addr, publicKey ssh.PublicKey) error { + + // The remote address input isn't checked. In the case of fronted + // protocols, the immediate remote peer won't be the Psiphon + // server. In direct cases, the client has just dialed the IP + // address and expected public key both taken from the same + // trusted, signed server entry. + if !bytes.Equal(expectedPublicKey, publicKey.Marshal()) { return errors.TraceNew("unexpected host public key") } @@ -1078,7 +1111,7 @@ func dialTunnel( } else { // For TUNNEL_PROTOCOL_SSH only, the server is expected to randomize // its KEX; setting PeerKEXPRNGSeed will ensure successful negotiation - // betweem two randomized KEXes. + // between two randomized KEXes. if dialParams.ServerEntry.SshObfuscatedKey != "" { sshClientConfig.PeerKEXPRNGSeed, err = protocol.DeriveSSHServerKEXPRNGSeed( dialParams.ServerEntry.SshObfuscatedKey) @@ -1243,6 +1276,312 @@ func dialTunnel( nil } +func dialConjure( + ctx context.Context, + config *Config, + dialParams *DialParameters, + enableIPv6Dials bool, + enablePortRandomization bool, + enableRegistrationOverrides bool) (net.Conn, func(), error) { + + // Specify a cache key with a scope that ensures that: + // + // (a) cached registrations aren't used across different networks, as a + // registration requires the client's public IP to match the value at time + // of registration; + // + // (b) cached registrations are associated with specific Psiphon server + // candidates, to ensure that replay will use the same phantom IP(s). + // + // This scheme allows for reuse of cached registrations on network A when a + // client roams from network A to network B and back to network A. + // + // Using the network ID as a proxy for client public IP address is a + // heurisitic: it's possible that a clients public IP address changes + // without the network ID changing, and it's not guaranteed that the client + // will be assigned the original public IP on network A; so there's some + // chance the registration cannot be reused. + + diagnosticID := dialParams.ServerEntry.GetDiagnosticID() + + cacheKey := dialParams.NetworkID + "-" + diagnosticID + + conjureConfig := &refraction.ConjureConfig{ + RegistrationCacheTTL: dialParams.ConjureCachedRegistrationTTL, + RegistrationCacheKey: cacheKey, + EnableIPv6Dials: enableIPv6Dials, + EnablePortRandomization: enablePortRandomization, + EnableRegistrationOverrides: enableRegistrationOverrides, + Transport: dialParams.ConjureTransport, + STUNServerAddress: dialParams.ConjureSTUNServerAddress, + DTLSEmptyInitialPacket: dialParams.ConjureDTLSEmptyInitialPacket, + DiagnosticID: diagnosticID, + Logger: NoticeCommonLogger(false), + } + + if dialParams.ConjureAPIRegistration { + + // Use MeekConn to domain front Conjure API registration. + // + // ConjureAPIRegistrarFrontingSpecs are applied via + // dialParams.GetMeekConfig, and will be subject to replay. + // + // Since DialMeek will create a TLS connection immediately, and a cached + // registration may be used, we will delay initializing the MeekConn-based + // RoundTripper until we know it's needed. This is implemented by passing + // in a RoundTripper that establishes a MeekConn when RoundTrip is called. + // + // In refraction.dial we configure 0 retries for API registration requests, + // assuming it's better to let another Psiphon candidate retry, with new + // domaing fronting parameters. As such, we expect only one round trip call + // per NewHTTPRoundTripper, so, in practise, there's no performance penalty + // from establishing a new MeekConn per round trip. + // + // Performing the full DialMeek/RoundTrip operation here allows us to call + // MeekConn.Close and ensure all resources are immediately cleaned up. + roundTrip := func(request *http.Request) (*http.Response, error) { + + conn, err := DialMeek( + ctx, dialParams.GetMeekConfig(), dialParams.GetDialConfig()) + if err != nil { + return nil, errors.Trace(err) + } + defer conn.Close() + + response, err := conn.RoundTrip(request) + if err != nil { + return nil, errors.Trace(err) + } + + // Read the response into a buffer and close the response + // body, ensuring that MeekConn.Close closes all idle connections. + // + // Alternatively, we could Clone the request to set + // http.Request.Close and avoid keeping any idle connection + // open after the response body is read by gotapdance. Since + // the response body is small and since gotapdance does not + // stream the response body, we're taking this approach which + // ensures cleanup. + + body, err := ioutil.ReadAll(response.Body) + _ = response.Body.Close() + if err != nil { + return nil, errors.Trace(err) + } + response.Body = io.NopCloser(bytes.NewReader(body)) + + return response, nil + } + + conjureConfig.APIRegistrarHTTPClient = &http.Client{ + Transport: common.NewHTTPRoundTripper(roundTrip), + } + + conjureConfig.APIRegistrarBidirectionalURL = + dialParams.ConjureAPIRegistrarBidirectionalURL + conjureConfig.APIRegistrarDelay = dialParams.ConjureAPIRegistrarDelay + + } else if dialParams.ConjureDecoyRegistration { + + // The Conjure "phantom" connection is compatible with fragmentation, but + // the decoy registrar connection, like Tapdance, is not, so force it off. + // Any tunnel fragmentation metrics will refer to the "phantom" connection + // only. + conjureConfig.DoDecoyRegistration = true + conjureConfig.DecoyRegistrarWidth = dialParams.ConjureDecoyRegistrarWidth + conjureConfig.DecoyRegistrarDelay = dialParams.ConjureDecoyRegistrarDelay + } + + // Set extraFailureAction, which is invoked whenever the tunnel fails (i.e., + // where RecordFailedTunnelStat is invoked). The action will remove any + // cached registration. When refraction.DialConjure succeeds, the underlying + // registration is cached. After refraction.DialConjure returns, it no + // longer modifies the cached state of that registration, assuming that it + // remains valid and effective. However adversarial impact on a given + // phantom IP may not become evident until after the initial TCP connection + // establishment and handshake performed by refraction.DialConjure. For + // example, it may be that the phantom dial is targeted for severe + // throttling which begins or is only evident later in the flow. Scheduling + // a call to DeleteCachedConjureRegistration allows us to invalidate the + // cached registration for a tunnel that fails later in its lifecycle. + // + // Note that extraFailureAction will retain a reference to conjureConfig for + // the lifetime of the tunnel. + extraFailureAction := func() { + refraction.DeleteCachedConjureRegistration(conjureConfig) + } + + dialCtx := ctx + if protocol.ConjureTransportUsesDTLS(dialParams.ConjureTransport) { + // Conjure doesn't use the DTLS seed scheme, which supports in-proxy + // DTLS randomization. But every DTLS dial expects to find a seed + // state, so set the no-seed state. + dialCtx = inproxy_dtls.SetNoDTLSSeed(ctx) + } + + dialConn, err := refraction.DialConjure( + dialCtx, + config.EmitRefractionNetworkingLogs, + config.GetPsiphonDataDirectory(), + NewRefractionNetworkingDialer(dialParams.GetDialConfig()).DialContext, + dialParams.DirectDialAddress, + conjureConfig) + if err != nil { + + // When this function fails, invoke extraFailureAction directly; when it + // succeeds, return extraFailureAction to be called later. + extraFailureAction() + + return nil, nil, errors.Trace(err) + } + + return dialConn, extraFailureAction, nil +} + +// makeInproxyTCPDialer returns a dialer which proxies TCP dials via an +// in-proxy proxy, as configured in dialParams. +// +// Limitation: MeekConn may redial TCP for a single tunnel connection, but +// that case is not supported by the in-proxy protocol, as the in-proxy proxy +// closes both its WebRTC DataChannel and the overall client connection when +// the upstream TCP connection closes. Any new connection from the client to +// the proxy must be a new tunnel connection with and accompanying +// broker/server relay. As a future enhancement, consider extending the +// in-proxy protocol to enable the client and proxy to establish additional +// WebRTC DataChannels and new upstream TCP connections within the scope of a +// single proxy/client connection. +func makeInproxyTCPDialer( + config *Config, dialParams *DialParameters) common.Dialer { + + return func(ctx context.Context, _, _ string) (net.Conn, error) { + + if dialParams.inproxyConn.Load() != nil { + return nil, errors.TraceNew("redial not supported") + } + + var conn net.Conn + var err error + + conn, err = dialInproxy(ctx, config, dialParams) + if err != nil { + return nil, errors.Trace(err) + } + + // When the TCP fragmentor is configured for the 2nd hop protocol, + // approximate the behavior by applying the fragmentor to the WebRTC + // DataChannel writes, which will result in delays and DataChannel + // message sizes which will be reflected in the proxy's relay to its + // upstream TCP connection. + // + // This code is copied from DialTCP. + // + // Limitation: TCP BPF settings are not supported and currently + // disabled for all 2nd hop cases in + // protocol.TunnelProtocolMayUseClientBPF. + + if dialParams.dialConfig.FragmentorConfig.MayFragment() { + conn = fragmentor.NewConn( + dialParams.dialConfig.FragmentorConfig, + func(message string) { + NoticeFragmentor(dialParams.dialConfig.DiagnosticID, message) + }, + conn) + } + + return conn, nil + } +} + +// dialInproxy performs the in-proxy dial and returns the resulting conn for +// use as an underlying conn for the 2nd hop protocol. The in-proxy dial +// first connects to the broker (or reuses an existing connection) to match +// with a proxy; and then establishes connection to the proxy. +func dialInproxy( + ctx context.Context, + config *Config, + dialParams *DialParameters) (*inproxy.ClientConn, error) { + + isProxy := false + webRTCDialInstance, err := NewInproxyWebRTCDialInstance( + config, + dialParams.NetworkID, + isProxy, + dialParams.inproxyNATStateManager, + dialParams.InproxySTUNDialParameters, + dialParams.InproxyWebRTCDialParameters) + if err != nil { + return nil, errors.Trace(err) + } + + // dialAddress indicates to the broker and proxy how to dial the upstream + // Psiphon server, based on the 2nd hop tunnel protocol. + + networkProtocol := inproxy.NetworkProtocolUDP + reliableTransport := false + if protocol.TunnelProtocolUsesTCP(dialParams.TunnelProtocol) { + networkProtocol = inproxy.NetworkProtocolTCP + reliableTransport = true + } + + dialAddress := dialParams.DirectDialAddress + if protocol.TunnelProtocolUsesMeek(dialParams.TunnelProtocol) { + dialAddress = dialParams.MeekDialAddress + } + + // Specify the value to be returned by inproxy.ClientConn.RemoteAddr. + // Currently, the one caller of RemoteAddr is utls, which uses the + // RemoteAddr as a TLS session cache key when there is no SNI. + // GetTLSSessionCacheKeyAddress returns a cache key value that is a valid + // address and that is also a more appropriate TLS session cache key than + // the proxy address. + + remoteAddrOverride, err := dialParams.ServerEntry.GetTLSSessionCacheKeyAddress( + dialParams.TunnelProtocol) + if err != nil { + return nil, errors.Trace(err) + } + + // Unlike the proxy broker case, clients already actively fetch tactics + // during tunnel estalishment, so tactics.SetTacticsAPIParameters are not + // sent to the broker and no tactics are returned by the broker. + // + // TODO: include broker fronting dial parameters to be logged by the + // broker -- as successful parameters might not otherwise by logged via + // server_tunnel if the subsequent WebRTC dials fail. + params := getBaseAPIParameters( + baseParametersNoDialParameters, true, config, nil) + + // The debugLogging flag is passed to both NoticeCommonLogger and to the + // inproxy package as well; skipping debug logs in the inproxy package, + // before calling into the notice logger, avoids unnecessary allocations + // and formatting when debug logging is off. + debugLogging := config.InproxyEnableWebRTCDebugLogging + + clientConfig := &inproxy.ClientConfig{ + Logger: NoticeCommonLogger(debugLogging), + EnableWebRTCDebugLogging: debugLogging, + BaseAPIParameters: params, + BrokerClient: dialParams.inproxyBrokerClient, + WebRTCDialCoordinator: webRTCDialInstance, + ReliableTransport: reliableTransport, + DialNetworkProtocol: networkProtocol, + DialAddress: dialAddress, + RemoteAddrOverride: remoteAddrOverride, + PackedDestinationServerEntry: dialParams.inproxyPackedSignedServerEntry, + } + + conn, err := inproxy.DialClient(ctx, clientConfig) + if err != nil { + return nil, errors.Trace(err) + } + + // The inproxy.ClientConn is stored in dialParams.inproxyConn in order to + // later fetch its connection ID and to facilitate broker/client replay. + dialParams.inproxyConn.Store(conn) + + return conn, nil +} + // Fields are exported for JSON encoding in NoticeLivenessTest. type livenessTestMetrics struct { Duration string @@ -1602,14 +1941,7 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) { case serverRequest := <-tunnel.sshServerRequests: if serverRequest != nil { - err := HandleServerRequest(tunnelOwner, tunnel, serverRequest.Type, serverRequest.Payload) - if err == nil { - serverRequest.Reply(true, nil) - } else { - NoticeWarning("HandleServerRequest for %s failed: %s", serverRequest.Type, err) - serverRequest.Reply(false, nil) - - } + HandleServerRequest(tunnelOwner, tunnel, serverRequest) } case <-tunnel.operateCtx.Done(): diff --git a/psiphon/upgradeDownload.go b/psiphon/upgradeDownload.go index ac06e201f..cb142efd1 100644 --- a/psiphon/upgradeDownload.go +++ b/psiphon/upgradeDownload.go @@ -87,6 +87,7 @@ func DownloadUpgrade( downloadURL := urls.Select(attempt) + payloadSecure := true httpClient, _, _, err := MakeDownloadHTTPClient( ctx, config, @@ -94,6 +95,7 @@ func DownloadUpgrade( untunneledDialConfig, downloadURL.SkipVerify, config.DisableSystemRootCAs, + payloadSecure, downloadURL.FrontingSpecs, func(frontingProviderID string) { NoticeInfo( diff --git a/psiphon/userAgent_test.go b/psiphon/userAgent_test.go index 00ff407fe..404deaba9 100644 --- a/psiphon/userAgent_test.go +++ b/psiphon/userAgent_test.go @@ -29,7 +29,6 @@ import ( "testing" "time" - "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server" "github.com/elazarl/goproxy" @@ -159,23 +158,10 @@ func attemptConnectionsWithUserAgent( // create a server entry - serverIPv4Address, serverIPv6Address, err := common.GetRoutableInterfaceIPAddresses() - if err != nil { - t.Fatalf("error getting server IP address: %s", err) - } - serverIPAddress := "" - if serverIPv4Address != nil { - serverIPAddress = serverIPv4Address.String() - } else { - serverIPAddress = serverIPv6Address.String() - } - _, _, _, _, encodedServerEntry, err := server.GenerateConfig( &server.GenerateConfigParams{ - ServerIPAddress: serverIPAddress, - EnableSSHAPIRequests: true, - WebServerPort: 8000, - TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, + ServerIPAddress: "127.0.0.1", + TunnelProtocolPorts: map[string]int{tunnelProtocol: 4000}, }) if err != nil { t.Fatalf("error generating server config: %s", err) @@ -192,8 +178,8 @@ func attemptConnectionsWithUserAgent( { "ClientPlatform" : "Windows", "ClientVersion" : "0", - "SponsorId" : "0", - "PropagationChannelId" : "0", + "SponsorId" : "0000000000000000", + "PropagationChannelId" : "0000000000000000", "ConnectionWorkerPoolSize" : 1, "EstablishTunnelPausePeriodSeconds" : 1, "DisableRemoteServerListFetcher" : true, diff --git a/psiphon/utils.go b/psiphon/utils.go index 2f96f86cd..9a3b2d1bc 100755 --- a/psiphon/utils.go +++ b/psiphon/utils.go @@ -28,12 +28,14 @@ import ( "os" "runtime" "runtime/debug" + "strings" "syscall" "time" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/ssh" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/quic" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/refraction" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/resolver" @@ -214,6 +216,10 @@ func (c conditionallyEnabledComponents) RefractionNetworkingEnabled() bool { return refraction.Enabled() } +func (c conditionallyEnabledComponents) InproxyEnabled() bool { + return inproxy.Enabled() +} + // FileMigration represents the action of moving a file, or directory, to a new // location. type FileMigration struct { @@ -271,3 +277,35 @@ func DoFileMigration(migration FileMigration) error { return nil } + +// GetNetworkType returns a network type name, suitable for metrics, which is +// derived from the network ID. +func GetNetworkType(networkID string) string { + + // Unlike the logic in loggingNetworkIDGetter.GetNetworkID, we don't take the + // arbitrary text before the first "-" since some platforms without network + // detection support stub in random values to enable tactics. Instead we + // check for and use the common network type prefixes currently used in + // NetworkIDGetter implementations. + + if strings.HasPrefix(networkID, "VPN") { + return "VPN" + } + if strings.HasPrefix(networkID, "WIFI") { + return "WIFI" + } + if strings.HasPrefix(networkID, "MOBILE") { + return "MOBILE" + } + return "UNKNOWN" +} + +// IsInproxyCompatibleNetworkType indicates if the network type for the given +// network ID is compatible with in-proxy operation. +func IsInproxyCompatibleNetworkType(networkID string) bool { + + // When the network type is "VPN", the outer client (or MobileLibrary) has + // detected that some other, non-Psiphon VPN is active. In this case, + // most in-proxy operations are expected to fail. + return GetNetworkType(networkID) != "VPN" +} diff --git a/replace/dtls/AUTHORS.txt b/replace/dtls/AUTHORS.txt new file mode 100644 index 000000000..e14fae4c0 --- /dev/null +++ b/replace/dtls/AUTHORS.txt @@ -0,0 +1,57 @@ +# Thank you to everyone that made Pion possible. If you are interested in contributing +# we would love to have you https://github.com/pion/webrtc/wiki/Contributing +# +# This file is auto generated, using git to list all individuals contributors. +# see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting +Aleksandr Razumov +alvarowolfx +Arlo Breault +Atsushi Watanabe +backkem +bjdgyc +boks1971 +Bragadeesh +Carson Hoffman +Cecylia Bocovich +Chris Hiszpanski +cnderrauber +Daniele Sluijters +folbrich +Hayden James +Hugo Arregui +Hugo Arregui +igolaizola <11333576+igolaizola@users.noreply.github.com> +Jeffrey Stoke +Jeroen de Bruijn +Jeroen de Bruijn +Jim Wert +jinleileiking +Jozef Kralik +Julien Salleyron +Juliusz Chroboczek +Kegan Dougal +Kevin Wang +Lander Noterman +Len +Lukas Lihotzki +ManuelBk <26275612+ManuelBk@users.noreply.github.com> +Michael Zabka +Michiel De Backker +Rachel Chen +Robert Eperjesi +Ryan Gordon +Sam Lancia +Sean DuBois +Sean DuBois +Sean DuBois +Shelikhoo +Stefan Tatschner +Steffen Vogel +Vadim +Vadim Filimonov +wmiao +ZHENK +吕海涛 + +# List of contributors not appearing in Git history + diff --git a/replace/dtls/LICENSE b/replace/dtls/LICENSE new file mode 100644 index 000000000..491caf6b0 --- /dev/null +++ b/replace/dtls/LICENSE @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2023 The Pion community + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/replace/dtls/LICENSES/CC0-1.0.txt b/replace/dtls/LICENSES/CC0-1.0.txt new file mode 100644 index 000000000..0e259d42c --- /dev/null +++ b/replace/dtls/LICENSES/CC0-1.0.txt @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/replace/dtls/LICENSES/MIT.txt b/replace/dtls/LICENSES/MIT.txt new file mode 100644 index 000000000..2071b23b0 --- /dev/null +++ b/replace/dtls/LICENSES/MIT.txt @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/replace/dtls/README.md b/replace/dtls/README.md new file mode 100644 index 000000000..0c0659593 --- /dev/null +++ b/replace/dtls/README.md @@ -0,0 +1,151 @@ +

+
+ Pion DTLS +
+

+

A Go implementation of DTLS

+

+ Pion DTLS + Sourcegraph Widget + Slack Widget +
+ GitHub Workflow Status + Go Reference + Coverage Status + Go Report Card + License: MIT +

+
+ +Native [DTLS 1.2][rfc6347] implementation in the Go programming language. + +A long term goal is a professional security review, and maybe an inclusion in stdlib. + +### RFCs +#### Implemented +- **RFC 6347**: [Datagram Transport Layer Security Version 1.2][rfc6347] +- **RFC 5705**: [Keying Material Exporters for Transport Layer Security (TLS)][rfc5705] +- **RFC 7627**: [Transport Layer Security (TLS) - Session Hash and Extended Master Secret Extension][rfc7627] +- **RFC 7301**: [Transport Layer Security (TLS) - Application-Layer Protocol Negotiation Extension][rfc7301] + +[rfc5289]: https://tools.ietf.org/html/rfc5289 +[rfc5487]: https://tools.ietf.org/html/rfc5487 +[rfc5489]: https://tools.ietf.org/html/rfc5489 +[rfc5705]: https://tools.ietf.org/html/rfc5705 +[rfc6347]: https://tools.ietf.org/html/rfc6347 +[rfc6655]: https://tools.ietf.org/html/rfc6655 +[rfc7301]: https://tools.ietf.org/html/rfc7301 +[rfc7627]: https://tools.ietf.org/html/rfc7627 +[rfc8422]: https://tools.ietf.org/html/rfc8422 + +### Goals/Progress +This will only be targeting DTLS 1.2, and the most modern/common cipher suites. +We would love contributions that fall under the 'Planned Features' and any bug fixes! + +#### Current features +* DTLS 1.2 Client/Server +* Key Exchange via ECDHE(curve25519, nistp256, nistp384) and PSK +* Packet loss and re-ordering is handled during handshaking +* Key export ([RFC 5705][rfc5705]) +* Serialization and Resumption of sessions +* Extended Master Secret extension ([RFC 7627][rfc7627]) +* ALPN extension ([RFC 7301][rfc7301]) + +#### Supported ciphers + +##### ECDHE + +* TLS_ECDHE_ECDSA_WITH_AES_128_CCM ([RFC 6655][rfc6655]) +* TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655]) +* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289]) +* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289]) +* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289]) +* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289]) +* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422]) +* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422]) + +##### PSK + +* TLS_PSK_WITH_AES_128_CCM ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_256_CCM_8 ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_128_GCM_SHA256 ([RFC 5487][rfc5487]) +* TLS_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5487][rfc5487]) + +##### ECDHE & PSK + +* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5489][rfc5489]) + +#### Planned Features +* Chacha20Poly1305 + +#### Excluded Features +* DTLS 1.0 +* Renegotiation +* Compression + +### Using + +This library needs at least Go 1.13, and you should have [Go modules +enabled](https://github.com/golang/go/wiki/Modules). + +#### Pion DTLS +For a DTLS 1.2 Server that listens on 127.0.0.1:4444 +```sh +go run examples/listen/selfsign/main.go +``` + +For a DTLS 1.2 Client that connects to 127.0.0.1:4444 +```sh +go run examples/dial/selfsign/main.go +``` + +#### OpenSSL +Pion DTLS can connect to itself and OpenSSL. +``` + // Generate a certificate + openssl ecparam -out key.pem -name prime256v1 -genkey + openssl req -new -sha256 -key key.pem -out server.csr + openssl x509 -req -sha256 -days 365 -in server.csr -signkey key.pem -out cert.pem + + // Use with examples/dial/selfsign/main.go + openssl s_server -dtls1_2 -cert cert.pem -key key.pem -accept 4444 + + // Use with examples/listen/selfsign/main.go + openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -debug -cert cert.pem -key key.pem +``` + +### Using with PSK +Pion DTLS also comes with examples that do key exchange via PSK + +#### Pion DTLS +```sh +go run examples/listen/psk/main.go +``` + +```sh +go run examples/dial/psk/main.go +``` + +#### OpenSSL +``` + // Use with examples/dial/psk/main.go + openssl s_server -dtls1_2 -accept 4444 -nocert -psk abc123 -cipher PSK-AES128-CCM8 + + // Use with examples/listen/psk/main.go + openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -psk abc123 -cipher PSK-AES128-CCM8 +``` + +### Community +Pion has an active community on the [Slack](https://pion.ly/slack). + +Follow the [Pion Twitter](https://twitter.com/_pion) for project updates and important WebRTC news. + +We are always looking to support **your projects**. Please reach out if you have something to build! +If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly) + +### Contributing +Check out the [contributing wiki](https://github.com/pion/webrtc/wiki/Contributing) to join the group of amazing people making this project possible: [AUTHORS.txt](./AUTHORS.txt) + +### License +MIT License - see [LICENSE](LICENSE) for full text diff --git a/replace/dtls/bench_test.go b/replace/dtls/bench_test.go new file mode 100644 index 000000000..abec5a5d7 --- /dev/null +++ b/replace/dtls/bench_test.go @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "crypto/tls" + "fmt" + "testing" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + "github.com/pion/logging" + "github.com/pion/transport/v2/dpipe" + "github.com/pion/transport/v2/test" +) + +func TestSimpleReadWrite(t *testing.T) { + report := test.CheckRoutines(t) + defer report() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + certificate, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + gotHello := make(chan struct{}) + + go func() { + server, sErr := testServer(ctx, cb, &Config{ + Certificates: []tls.Certificate{certificate}, + LoggerFactory: logging.NewDefaultLoggerFactory(), + }, false) + if sErr != nil { + t.Error(sErr) + return + } + buf := make([]byte, 1024) + if _, sErr = server.Read(buf); sErr != nil { + t.Error(sErr) + } + gotHello <- struct{}{} + if sErr = server.Close(); sErr != nil { //nolint:contextcheck + t.Error(sErr) + } + }() + + client, err := testClient(ctx, ca, &Config{ + LoggerFactory: logging.NewDefaultLoggerFactory(), + InsecureSkipVerify: true, + }, false) + if err != nil { + t.Fatal(err) + } + if _, err = client.Write([]byte("hello")); err != nil { + t.Error(err) + } + select { + case <-gotHello: + // OK + case <-time.After(time.Second * 5): + t.Error("timeout") + } + + if err = client.Close(); err != nil { + t.Error(err) + } +} + +func benchmarkConn(b *testing.B, n int64) { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + ctx := context.Background() + + ca, cb := dpipe.Pipe() + certificate, err := selfsign.GenerateSelfSigned() + server := make(chan *Conn) + go func() { + s, sErr := testServer(ctx, cb, &Config{ + Certificates: []tls.Certificate{certificate}, + }, false) + if err != nil { + b.Error(sErr) + return + } + server <- s + }() + if err != nil { + b.Fatal(err) + } + hw := make([]byte, n) + b.ReportAllocs() + b.SetBytes(int64(len(hw))) + go func() { + client, cErr := testClient(ctx, ca, &Config{InsecureSkipVerify: true}, false) + if cErr != nil { + b.Error(err) + } + for { + if _, cErr = client.Write(hw); cErr != nil { //nolint:contextcheck + b.Error(err) + } + } + }() + s := <-server + buf := make([]byte, 2048) + for i := 0; i < b.N; i++ { + if _, err = s.Read(buf); err != nil { + b.Error(err) + } + } + }) +} + +func BenchmarkConnReadWrite(b *testing.B) { + for _, n := range []int64{16, 128, 512, 1024, 2048} { + benchmarkConn(b, n) + } +} diff --git a/replace/dtls/certificate.go b/replace/dtls/certificate.go new file mode 100644 index 000000000..6f6ad55f5 --- /dev/null +++ b/replace/dtls/certificate.go @@ -0,0 +1,163 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +// ClientHelloInfo contains information from a ClientHello message in order to +// guide application logic in the GetCertificate. +type ClientHelloInfo struct { + // ServerName indicates the name of the server requested by the client + // in order to support virtual hosting. ServerName is only set if the + // client is using SNI (see RFC 4366, Section 3.1). + ServerName string + + // CipherSuites lists the CipherSuites supported by the client (e.g. + // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256). + CipherSuites []CipherSuiteID + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + RandomBytes [handshake.RandomBytesLength]byte +} + +// CertificateRequestInfo contains information from a server's +// CertificateRequest message, which is used to demand a certificate and proof +// of control from a client. +type CertificateRequestInfo struct { + // AcceptableCAs contains zero or more, DER-encoded, X.501 + // Distinguished Names. These are the names of root or intermediate CAs + // that the server wishes the returned certificate to be signed by. An + // empty slice indicates that the server has no preference. + AcceptableCAs [][]byte +} + +// SupportsCertificate returns nil if the provided certificate is supported by +// the server that sent the CertificateRequest. Otherwise, it returns an error +// describing the reason for the incompatibility. +// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/common.go#L1273 +func (cri *CertificateRequestInfo) SupportsCertificate(c *tls.Certificate) error { + if len(cri.AcceptableCAs) == 0 { + return nil + } + + for j, cert := range c.Certificate { + x509Cert := c.Leaf + // Parse the certificate if this isn't the leaf node, or if + // chain.Leaf was nil. + if j != 0 || x509Cert == nil { + var err error + if x509Cert, err = x509.ParseCertificate(cert); err != nil { + return fmt.Errorf("failed to parse certificate #%d in the chain: %w", j, err) + } + } + + for _, ca := range cri.AcceptableCAs { + if bytes.Equal(x509Cert.RawIssuer, ca) { + return nil + } + } + } + return errNotAcceptableCertificateChain +} + +func (c *handshakeConfig) setNameToCertificateLocked() { + nameToCertificate := make(map[string]*tls.Certificate) + for i := range c.localCertificates { + cert := &c.localCertificates[i] + x509Cert := cert.Leaf + if x509Cert == nil { + var parseErr error + x509Cert, parseErr = x509.ParseCertificate(cert.Certificate[0]) + if parseErr != nil { + continue + } + } + if len(x509Cert.Subject.CommonName) > 0 { + nameToCertificate[strings.ToLower(x509Cert.Subject.CommonName)] = cert + } + for _, san := range x509Cert.DNSNames { + nameToCertificate[strings.ToLower(san)] = cert + } + } + c.nameToCertificate = nameToCertificate +} + +func (c *handshakeConfig) getCertificate(clientHelloInfo *ClientHelloInfo) (*tls.Certificate, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.localGetCertificate != nil && + (len(c.localCertificates) == 0 || len(clientHelloInfo.ServerName) > 0) { + cert, err := c.localGetCertificate(clientHelloInfo) + if cert != nil || err != nil { + return cert, err + } + } + + if c.nameToCertificate == nil { + c.setNameToCertificateLocked() + } + + if len(c.localCertificates) == 0 { + return nil, errNoCertificates + } + + if len(c.localCertificates) == 1 { + // There's only one choice, so no point doing any work. + return &c.localCertificates[0], nil + } + + if len(clientHelloInfo.ServerName) == 0 { + return &c.localCertificates[0], nil + } + + name := strings.TrimRight(strings.ToLower(clientHelloInfo.ServerName), ".") + + if cert, ok := c.nameToCertificate[name]; ok { + return cert, nil + } + + // try replacing labels in the name with wildcards until we get a + // match. + labels := strings.Split(name, ".") + for i := range labels { + labels[i] = "*" + candidate := strings.Join(labels, ".") + if cert, ok := c.nameToCertificate[candidate]; ok { + return cert, nil + } + } + + // If nothing matches, return the first certificate. + return &c.localCertificates[0], nil +} + +// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/handshake_client.go#L974 +func (c *handshakeConfig) getClientCertificate(cri *CertificateRequestInfo) (*tls.Certificate, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.localGetClientCertificate != nil { + return c.localGetClientCertificate(cri) + } + + for i := range c.localCertificates { + chain := c.localCertificates[i] + if err := cri.SupportsCertificate(&chain); err != nil { + continue + } + return &chain, nil + } + + // No acceptable certificate found. Don't send a certificate. + return new(tls.Certificate), nil +} diff --git a/replace/dtls/certificate_test.go b/replace/dtls/certificate_test.go new file mode 100644 index 000000000..5f2e87bb4 --- /dev/null +++ b/replace/dtls/certificate_test.go @@ -0,0 +1,104 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "crypto/tls" + "reflect" + "testing" + + "github.com/pion/dtls/v2/pkg/crypto/selfsign" +) + +func TestGetCertificate(t *testing.T) { + certificateWildcard, err := selfsign.GenerateSelfSignedWithDNS("*.test.test") + if err != nil { + t.Fatal(err) + } + + certificateTest, err := selfsign.GenerateSelfSignedWithDNS("test.test", "www.test.test", "pop.test.test") + if err != nil { + t.Fatal(err) + } + + certificateRandom, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + localCertificates []tls.Certificate + desc string + serverName string + expectedCertificate tls.Certificate + getCertificate func(info *ClientHelloInfo) (*tls.Certificate, error) + }{ + { + desc: "Simple match in CN", + localCertificates: []tls.Certificate{ + certificateRandom, + certificateTest, + certificateWildcard, + }, + serverName: "test.test", + expectedCertificate: certificateTest, + }, + { + desc: "Simple match in SANs", + localCertificates: []tls.Certificate{ + certificateRandom, + certificateTest, + certificateWildcard, + }, + serverName: "www.test.test", + expectedCertificate: certificateTest, + }, + + { + desc: "Wildcard match", + localCertificates: []tls.Certificate{ + certificateRandom, + certificateTest, + certificateWildcard, + }, + serverName: "foo.test.test", + expectedCertificate: certificateWildcard, + }, + { + desc: "No match return first", + localCertificates: []tls.Certificate{ + certificateRandom, + certificateTest, + certificateWildcard, + }, + serverName: "foo.bar", + expectedCertificate: certificateRandom, + }, + { + desc: "Get certificate from callback", + getCertificate: func(info *ClientHelloInfo) (*tls.Certificate, error) { + return &certificateTest, nil + }, + expectedCertificate: certificateTest, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + cfg := &handshakeConfig{ + localCertificates: test.localCertificates, + localGetCertificate: test.getCertificate, + } + cert, err := cfg.getCertificate(&ClientHelloInfo{ServerName: test.serverName}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(cert.Leaf, test.expectedCertificate.Leaf) { + t.Fatalf("Certificate does not match: expected(%v) actual(%v)", test.expectedCertificate.Leaf, cert.Leaf) + } + }) + } +} diff --git a/replace/dtls/cipher_suite.go b/replace/dtls/cipher_suite.go new file mode 100644 index 000000000..7a5bb4a58 --- /dev/null +++ b/replace/dtls/cipher_suite.go @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "fmt" + "hash" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// CipherSuiteID is an ID for our supported CipherSuites +type CipherSuiteID = ciphersuite.ID + +// Supported Cipher Suites +const ( + // AES-128-CCM + TLS_ECDHE_ECDSA_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM //nolint:revive,stylecheck + TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 //nolint:revive,stylecheck + + // AES-128-GCM-SHA256 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck + + // AES-256-CBC-SHA + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck + + TLS_PSK_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM_8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_256_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_256_CCM_8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck + + TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck +) + +// CipherSuiteAuthenticationType controls what authentication method is using during the handshake for a CipherSuite +type CipherSuiteAuthenticationType = ciphersuite.AuthenticationType + +// AuthenticationType Enums +const ( + CipherSuiteAuthenticationTypeCertificate CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeCertificate + CipherSuiteAuthenticationTypePreSharedKey CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypePreSharedKey + CipherSuiteAuthenticationTypeAnonymous CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeAnonymous +) + +// CipherSuiteKeyExchangeAlgorithm controls what exchange algorithm is using during the handshake for a CipherSuite +type CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithm + +// CipherSuiteKeyExchangeAlgorithm Bitmask +const ( + CipherSuiteKeyExchangeAlgorithmNone CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmNone + CipherSuiteKeyExchangeAlgorithmPsk CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmPsk + CipherSuiteKeyExchangeAlgorithmEcdhe CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmEcdhe +) + +var _ = allCipherSuites() // Necessary until this function isn't only used by Go 1.14 + +// CipherSuite is an interface that all DTLS CipherSuites must satisfy +type CipherSuite interface { + // String of CipherSuite, only used for logging + String() string + + // ID of CipherSuite. + ID() CipherSuiteID + + // What type of Certificate does this CipherSuite use + CertificateType() clientcertificate.Type + + // What Hash function is used during verification + HashFunc() func() hash.Hash + + // AuthenticationType controls what authentication method is using during the handshake + AuthenticationType() CipherSuiteAuthenticationType + + // KeyExchangeAlgorithm controls what exchange algorithm is using during the handshake + KeyExchangeAlgorithm() CipherSuiteKeyExchangeAlgorithm + + // ECC (Elliptic Curve Cryptography) determines whether ECC extesions will be send during handshake. + // https://datatracker.ietf.org/doc/html/rfc4492#page-10 + ECC() bool + + // Called when keying material has been generated, should initialize the internal cipher + Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error + IsInitialized() bool + Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) + Decrypt(in []byte) ([]byte, error) +} + +// CipherSuiteName provides the same functionality as tls.CipherSuiteName +// that appeared first in Go 1.14. +// +// Our implementation differs slightly in that it takes in a CiperSuiteID, +// like the rest of our library, instead of a uint16 like crypto/tls. +func CipherSuiteName(id CipherSuiteID) string { + suite := cipherSuiteForID(id, nil) + if suite != nil { + return suite.String() + } + return fmt.Sprintf("0x%04X", uint16(id)) +} + +// Taken from https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// A cipherSuite is a specific combination of key agreement, cipher and MAC +// function. +func cipherSuiteForID(id CipherSuiteID, customCiphers func() []CipherSuite) CipherSuite { + switch id { //nolint:exhaustive + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM: + return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm() + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8: + return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8() + case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{} + case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{} + case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: + return &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{} + case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + return &ciphersuite.TLSEcdheRsaWithAes256CbcSha{} + case TLS_PSK_WITH_AES_128_CCM: + return ciphersuite.NewTLSPskWithAes128Ccm() + case TLS_PSK_WITH_AES_128_CCM_8: + return ciphersuite.NewTLSPskWithAes128Ccm8() + case TLS_PSK_WITH_AES_256_CCM_8: + return ciphersuite.NewTLSPskWithAes256Ccm8() + case TLS_PSK_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSPskWithAes128GcmSha256{} + case TLS_PSK_WITH_AES_128_CBC_SHA256: + return &ciphersuite.TLSPskWithAes128CbcSha256{} + case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{} + case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{} + case TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256: + return ciphersuite.NewTLSEcdhePskWithAes128CbcSha256() + } + + if customCiphers != nil { + for _, c := range customCiphers() { + if c.ID() == id { + return c + } + } + } + + return nil +} + +// CipherSuites we support in order of preference +func defaultCipherSuites() []CipherSuite { + return []CipherSuite{ + &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheRsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{}, + &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{}, + } +} + +func allCipherSuites() []CipherSuite { + return []CipherSuite{ + ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm(), + ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8(), + &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheRsaWithAes256CbcSha{}, + ciphersuite.NewTLSPskWithAes128Ccm(), + ciphersuite.NewTLSPskWithAes128Ccm8(), + ciphersuite.NewTLSPskWithAes256Ccm8(), + &ciphersuite.TLSPskWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{}, + &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{}, + } +} + +func cipherSuiteIDs(cipherSuites []CipherSuite) []uint16 { + rtrn := []uint16{} + for _, c := range cipherSuites { + rtrn = append(rtrn, uint16(c.ID())) + } + return rtrn +} + +func parseCipherSuites(userSelectedSuites []CipherSuiteID, customCipherSuites func() []CipherSuite, includeCertificateSuites, includePSKSuites bool) ([]CipherSuite, error) { + cipherSuitesForIDs := func(ids []CipherSuiteID) ([]CipherSuite, error) { + cipherSuites := []CipherSuite{} + for _, id := range ids { + c := cipherSuiteForID(id, nil) + if c == nil { + return nil, &invalidCipherSuiteError{id} + } + cipherSuites = append(cipherSuites, c) + } + return cipherSuites, nil + } + + var ( + cipherSuites []CipherSuite + err error + i int + ) + if userSelectedSuites != nil { + cipherSuites, err = cipherSuitesForIDs(userSelectedSuites) + if err != nil { + return nil, err + } + } else { + cipherSuites = defaultCipherSuites() + } + + // Put CustomCipherSuites before ID selected suites + if customCipherSuites != nil { + cipherSuites = append(customCipherSuites(), cipherSuites...) + } + + var foundCertificateSuite, foundPSKSuite, foundAnonymousSuite bool + for _, c := range cipherSuites { + switch { + case includeCertificateSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate: + foundCertificateSuite = true + case includePSKSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey: + foundPSKSuite = true + case c.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous: + foundAnonymousSuite = true + default: + continue + } + cipherSuites[i] = c + i++ + } + + switch { + case includeCertificateSuites && !foundCertificateSuite && !foundAnonymousSuite: + return nil, errNoAvailableCertificateCipherSuite + case includePSKSuites && !foundPSKSuite: + return nil, errNoAvailablePSKCipherSuite + case i == 0: + return nil, errNoAvailableCipherSuites + } + + return cipherSuites[:i], nil +} + +func filterCipherSuitesForCertificate(cert *tls.Certificate, cipherSuites []CipherSuite) []CipherSuite { + if cert == nil || cert.PrivateKey == nil { + return cipherSuites + } + var certType clientcertificate.Type + switch cert.PrivateKey.(type) { + case ed25519.PrivateKey, *ecdsa.PrivateKey: + certType = clientcertificate.ECDSASign + case *rsa.PrivateKey: + certType = clientcertificate.RSASign + } + + filtered := []CipherSuite{} + for _, c := range cipherSuites { + if c.AuthenticationType() != CipherSuiteAuthenticationTypeCertificate || certType == c.CertificateType() { + filtered = append(filtered, c) + } + } + return filtered +} diff --git a/replace/dtls/cipher_suite_go114.go b/replace/dtls/cipher_suite_go114.go new file mode 100644 index 000000000..fd46d7bd9 --- /dev/null +++ b/replace/dtls/cipher_suite_go114.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build go1.14 +// +build go1.14 + +package dtls + +import ( + "crypto/tls" +) + +// VersionDTLS12 is the DTLS version in the same style as +// VersionTLSXX from crypto/tls +const VersionDTLS12 = 0xfefd + +// Convert from our cipherSuite interface to a tls.CipherSuite struct +func toTLSCipherSuite(c CipherSuite) *tls.CipherSuite { + return &tls.CipherSuite{ + ID: uint16(c.ID()), + Name: c.String(), + SupportedVersions: []uint16{VersionDTLS12}, + Insecure: false, + } +} + +// CipherSuites returns a list of cipher suites currently implemented by this +// package, excluding those with security issues, which are returned by +// InsecureCipherSuites. +func CipherSuites() []*tls.CipherSuite { + suites := allCipherSuites() + res := make([]*tls.CipherSuite, len(suites)) + for i, c := range suites { + res[i] = toTLSCipherSuite(c) + } + return res +} + +// InsecureCipherSuites returns a list of cipher suites currently implemented by +// this package and which have security issues. +func InsecureCipherSuites() []*tls.CipherSuite { + var res []*tls.CipherSuite + return res +} diff --git a/replace/dtls/cipher_suite_go114_test.go b/replace/dtls/cipher_suite_go114_test.go new file mode 100644 index 000000000..35c4b1ef6 --- /dev/null +++ b/replace/dtls/cipher_suite_go114_test.go @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build go1.14 +// +build go1.14 + +package dtls + +import ( + "testing" +) + +func TestInsecureCipherSuites(t *testing.T) { + r := InsecureCipherSuites() + + if len(r) != 0 { + t.Fatalf("Expected no insecure ciphersuites, got %d", len(r)) + } +} + +func TestCipherSuites(t *testing.T) { + ours := allCipherSuites() + theirs := CipherSuites() + + if len(ours) != len(theirs) { + t.Fatalf("Expected %d CipherSuites, got %d", len(ours), len(theirs)) + } + + for i, s := range ours { + i := i + s := s + t.Run(s.String(), func(t *testing.T) { + c := theirs[i] + if c.ID != uint16(s.ID()) { + t.Fatalf("Expected ID: 0x%04X, got 0x%04X", s.ID(), c.ID) + } + + if c.Name != s.String() { + t.Fatalf("Expected Name: %s, got %s", s.String(), c.Name) + } + + if len(c.SupportedVersions) != 1 { + t.Fatalf("Expected %d SupportedVersion, got %d", 1, len(c.SupportedVersions)) + } + + if c.SupportedVersions[0] != VersionDTLS12 { + t.Fatalf("Expected SupportedVersions 0x%04X, got 0x%04X", VersionDTLS12, c.SupportedVersions[0]) + } + + if c.Insecure { + t.Fatalf("Expected Insecure %t, got %t", false, c.Insecure) + } + }) + } +} diff --git a/replace/dtls/cipher_suite_test.go b/replace/dtls/cipher_suite_test.go new file mode 100644 index 000000000..655fe6717 --- /dev/null +++ b/replace/dtls/cipher_suite_test.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "testing" + "time" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/transport/v2/dpipe" + "github.com/pion/transport/v2/test" +) + +func TestCipherSuiteName(t *testing.T) { + testCases := []struct { + suite CipherSuiteID + expected string + }{ + {TLS_ECDHE_ECDSA_WITH_AES_128_CCM, "TLS_ECDHE_ECDSA_WITH_AES_128_CCM"}, + {CipherSuiteID(0x0000), "0x0000"}, + } + + for _, testCase := range testCases { + res := CipherSuiteName(testCase.suite) + if res != testCase.expected { + t.Fatalf("Expected: %s, got %s", testCase.expected, res) + } + } +} + +func TestAllCipherSuites(t *testing.T) { + actual := len(allCipherSuites()) + if actual == 0 { + t.Fatal() + } +} + +// CustomCipher that is just used to assert Custom IDs work +type testCustomCipherSuite struct { + ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256 + authenticationType CipherSuiteAuthenticationType +} + +func (t *testCustomCipherSuite) ID() CipherSuiteID { + return 0xFFFF +} + +func (t *testCustomCipherSuite) AuthenticationType() CipherSuiteAuthenticationType { + return t.authenticationType +} + +// Assert that two connections that pass in a CipherSuite with a CustomID works +func TestCustomCipherSuite(t *testing.T) { + type result struct { + c *Conn + err error + } + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + runTest := func(cipherFactory func() []CipherSuite) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, &Config{ + CipherSuites: []CipherSuiteID{}, + CustomCipherSuites: cipherFactory, + }, true) + c <- result{client, err} + }() + + server, err := testServer(ctx, cb, &Config{ + CipherSuites: []CipherSuiteID{}, + CustomCipherSuites: cipherFactory, + }, true) + + clientResult := <-c + + if err != nil { + t.Error(err) + } else { + _ = server.Close() + } + + if clientResult.err != nil { + t.Error(clientResult.err) + } else { + _ = clientResult.c.Close() + } + } + + t.Run("Custom ID", func(t *testing.T) { + runTest(func() []CipherSuite { + return []CipherSuite{&testCustomCipherSuite{authenticationType: CipherSuiteAuthenticationTypeCertificate}} + }) + }) + + t.Run("Anonymous Cipher", func(t *testing.T) { + runTest(func() []CipherSuite { + return []CipherSuite{&testCustomCipherSuite{authenticationType: CipherSuiteAuthenticationTypeAnonymous}} + }) + }) +} diff --git a/replace/dtls/codecov.yml b/replace/dtls/codecov.yml new file mode 100644 index 000000000..263e4d45c --- /dev/null +++ b/replace/dtls/codecov.yml @@ -0,0 +1,22 @@ +# +# DO NOT EDIT THIS FILE +# +# It is automatically copied from https://github.com/pion/.goassets repository. +# +# SPDX-FileCopyrightText: 2023 The Pion community +# SPDX-License-Identifier: MIT + +coverage: + status: + project: + default: + # Allow decreasing 2% of total coverage to avoid noise. + threshold: 2% + patch: + default: + target: 70% + only_pulls: true + +ignore: + - "examples/*" + - "examples/**/*" diff --git a/replace/dtls/compression_method.go b/replace/dtls/compression_method.go new file mode 100644 index 000000000..7e44de009 --- /dev/null +++ b/replace/dtls/compression_method.go @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import "github.com/pion/dtls/v2/pkg/protocol" + +func defaultCompressionMethods() []*protocol.CompressionMethod { + return []*protocol.CompressionMethod{ + {}, + } +} diff --git a/replace/dtls/config.go b/replace/dtls/config.go new file mode 100644 index 000000000..c70c19973 --- /dev/null +++ b/replace/dtls/config.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "io" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/logging" +) + +const keyLogLabelTLS12 = "CLIENT_RANDOM" + +// Config is used to configure a DTLS client or server. +// After a Config is passed to a DTLS function it must not be modified. +type Config struct { + // Certificates contains certificate chain to present to the other side of the connection. + // Server MUST set this if PSK is non-nil + // client SHOULD sets this so CertificateRequests can be handled if PSK is non-nil + Certificates []tls.Certificate + + // CipherSuites is a list of supported cipher suites. + // If CipherSuites is nil, a default list is used + CipherSuites []CipherSuiteID + + // CustomCipherSuites is a list of CipherSuites that can be + // provided by the user. This allow users to user Ciphers that are reserved + // for private usage. + CustomCipherSuites func() []CipherSuite + + // SignatureSchemes contains the signature and hash schemes that the peer requests to verify. + SignatureSchemes []tls.SignatureScheme + + // SRTPProtectionProfiles are the supported protection profiles + // Clients will send this via use_srtp and assert that the server properly responds + // Servers will assert that clients send one of these profiles and will respond as needed + SRTPProtectionProfiles []SRTPProtectionProfile + + // ClientAuth determines the server's policy for + // TLS Client Authentication. The default is NoClientCert. + ClientAuth ClientAuthType + + // RequireExtendedMasterSecret determines if the "Extended Master Secret" extension + // should be disabled, requested, or required (default requested). + ExtendedMasterSecret ExtendedMasterSecretType + + // FlightInterval controls how often we send outbound handshake messages + // defaults to time.Second + FlightInterval time.Duration + + // PSK sets the pre-shared key used by this DTLS connection + // If PSK is non-nil only PSK CipherSuites will be used + PSK PSKCallback + PSKIdentityHint []byte + + // InsecureSkipVerify controls whether a client verifies the + // server's certificate chain and host name. + // If InsecureSkipVerify is true, TLS accepts any certificate + // presented by the server and any host name in that certificate. + // In this mode, TLS is susceptible to man-in-the-middle attacks. + // This should be used only for testing. + InsecureSkipVerify bool + + // InsecureHashes allows the use of hashing algorithms that are known + // to be vulnerable. + InsecureHashes bool + + // VerifyPeerCertificate, if not nil, is called after normal + // certificate verification by either a client or server. It + // receives the certificate provided by the peer and also a flag + // that tells if normal verification has succeedded. If it returns a + // non-nil error, the handshake is aborted and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. If normal verification is disabled by + // setting InsecureSkipVerify, or (for a server) when ClientAuth is + // RequestClientCert or RequireAnyClientCert, then this callback will + // be considered but the verifiedChains will always be nil. + VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + + // VerifyConnection, if not nil, is called after normal certificate + // verification/PSK and after VerifyPeerCertificate by either a TLS client + // or server. If it returns a non-nil error, the handshake is aborted + // and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. This callback will run for all connections + // regardless of InsecureSkipVerify or ClientAuth settings. + VerifyConnection func(*State) error + + // RootCAs defines the set of root certificate authorities + // that one peer uses when verifying the other peer's certificates. + // If RootCAs is nil, TLS uses the host's root CA set. + RootCAs *x509.CertPool + + // ClientCAs defines the set of root certificate authorities + // that servers use if required to verify a client certificate + // by the policy in ClientAuth. + ClientCAs *x509.CertPool + + // ServerName is used to verify the hostname on the returned + // certificates unless InsecureSkipVerify is given. + ServerName string + + LoggerFactory logging.LoggerFactory + + // ConnectContextMaker is a function to make a context used in Dial(), + // Client(), Server(), and Accept(). If nil, the default ConnectContextMaker + // is used. It can be implemented as following. + // + // func ConnectContextMaker() (context.Context, func()) { + // return context.WithTimeout(context.Background(), 30*time.Second) + // } + ConnectContextMaker func() (context.Context, func()) + + // MTU is the length at which handshake messages will be fragmented to + // fit within the maximum transmission unit (default is 1200 bytes) + MTU int + + // ReplayProtectionWindow is the size of the replay attack protection window. + // Duplication of the sequence number is checked in this window size. + // Packet with sequence number older than this value compared to the latest + // accepted packet will be discarded. (default is 64) + ReplayProtectionWindow int + + // KeyLogWriter optionally specifies a destination for TLS master secrets + // in NSS key log format that can be used to allow external programs + // such as Wireshark to decrypt TLS connections. + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. + // Use of KeyLogWriter compromises security and should only be + // used for debugging. + KeyLogWriter io.Writer + + // SessionStore is the container to store session for resumption. + SessionStore SessionStore + + // List of application protocols the peer supports, for ALPN + SupportedProtocols []string + + // List of Elliptic Curves to use + // + // If an ECC ciphersuite is configured and EllipticCurves is empty + // it will default to X25519, P-256, P-384 in this specific order. + EllipticCurves []elliptic.Curve + + // GetCertificate returns a Certificate based on the given + // ClientHelloInfo. It will only be called if the client supplies SNI + // information or if Certificates is empty. + // + // If GetCertificate is nil or returns nil, then the certificate is + // retrieved from NameToCertificate. If NameToCertificate is nil, the + // best element of Certificates will be used. + GetCertificate func(*ClientHelloInfo) (*tls.Certificate, error) + + // GetClientCertificate, if not nil, is called when a server requests a + // certificate from a client. If set, the contents of Certificates will + // be ignored. + // + // If GetClientCertificate returns an error, the handshake will be + // aborted and that error will be returned. Otherwise + // GetClientCertificate must return a non-nil Certificate. If + // Certificate.Certificate is empty then no certificate will be sent to + // the server. If this is unacceptable to the server then it may abort + // the handshake. + GetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error) + + // InsecureSkipVerifyHello, if true and when acting as server, allow client to + // skip hello verify phase and receive ServerHello after initial ClientHello. + // This have implication on DoS attack resistance. + InsecureSkipVerifyHello bool + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + // CustomClientHelloRandom optionaly allows the use of custom random bytes in the ClientHello message + CustomClientHelloRandom func() [handshake.RandomBytesLength]byte +} + +func defaultConnectContextMaker() (context.Context, func()) { + return context.WithTimeout(context.Background(), 30*time.Second) +} + +func (c *Config) connectContextMaker() (context.Context, func()) { + if c.ConnectContextMaker == nil { + return defaultConnectContextMaker() + } + return c.ConnectContextMaker() +} + +func (c *Config) includeCertificateSuites() bool { + return c.PSK == nil || len(c.Certificates) > 0 || c.GetCertificate != nil || c.GetClientCertificate != nil +} + +const defaultMTU = 1200 // bytes + +var defaultCurves = []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384} //nolint:gochecknoglobals + +// PSKCallback is called once we have the remote's PSKIdentityHint. +// If the remote provided none it will be nil +type PSKCallback func([]byte) ([]byte, error) + +// ClientAuthType declares the policy the server will follow for +// TLS Client Authentication. +type ClientAuthType int + +// ClientAuthType enums +const ( + NoClientCert ClientAuthType = iota + RequestClientCert + RequireAnyClientCert + VerifyClientCertIfGiven + RequireAndVerifyClientCert +) + +// ExtendedMasterSecretType declares the policy the client and server +// will follow for the Extended Master Secret extension +type ExtendedMasterSecretType int + +// ExtendedMasterSecretType enums +const ( + RequestExtendedMasterSecret ExtendedMasterSecretType = iota + RequireExtendedMasterSecret + DisableExtendedMasterSecret +) + +func validateConfig(config *Config) error { + switch { + case config == nil: + return errNoConfigProvided + case config.PSKIdentityHint != nil && config.PSK == nil: + return errIdentityNoPSK + } + + for _, cert := range config.Certificates { + if cert.Certificate == nil { + return errInvalidCertificate + } + if cert.PrivateKey != nil { + switch cert.PrivateKey.(type) { + case ed25519.PrivateKey: + case *ecdsa.PrivateKey: + case *rsa.PrivateKey: + default: + return errInvalidPrivateKey + } + } + } + + _, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil) + return err +} diff --git a/replace/dtls/config_test.go b/replace/dtls/config_test.go new file mode 100644 index 000000000..811427a0c --- /dev/null +++ b/replace/dtls/config_test.go @@ -0,0 +1,140 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "crypto/dsa" //nolint:staticcheck + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "errors" + "testing" + + "github.com/pion/dtls/v2/pkg/crypto/selfsign" +) + +func TestValidateConfig(t *testing.T) { + cert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatalf("TestValidateConfig: Config validation error(%v), self signed certificate not generated", err) + return + } + dsaPrivateKey := &dsa.PrivateKey{} + err = dsa.GenerateParameters(&dsaPrivateKey.Parameters, rand.Reader, dsa.L1024N160) + if err != nil { + t.Fatalf("TestValidateConfig: Config validation error(%v), DSA parameters not generated", err) + return + } + err = dsa.GenerateKey(dsaPrivateKey, rand.Reader) + if err != nil { + t.Fatalf("TestValidateConfig: Config validation error(%v), DSA private key not generated", err) + return + } + rsaPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("TestValidateConfig: Config validation error(%v), RSA private key not generated", err) + return + } + cases := map[string]struct { + config *Config + wantAnyErr bool + expErr error + }{ + "Empty config": { + expErr: errNoConfigProvided, + }, + "PSK and Certificate, valid cipher suites": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + PSK: func(hint []byte) ([]byte, error) { + return nil, nil + }, + Certificates: []tls.Certificate{cert}, + }, + }, + "PSK and Certificate, no PSK cipher suite": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + PSK: func(hint []byte) ([]byte, error) { + return nil, nil + }, + Certificates: []tls.Certificate{cert}, + }, + expErr: errNoAvailablePSKCipherSuite, + }, + "PSK and Certificate, no non-PSK cipher suite": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + PSK: func(hint []byte) ([]byte, error) { + return nil, nil + }, + Certificates: []tls.Certificate{cert}, + }, + expErr: errNoAvailableCertificateCipherSuite, + }, + "PSK identity hint with not PSK": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + PSK: nil, + PSKIdentityHint: []byte{}, + }, + expErr: errIdentityNoPSK, + }, + "Invalid private key": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + Certificates: []tls.Certificate{{Certificate: cert.Certificate, PrivateKey: dsaPrivateKey}}, + }, + expErr: errInvalidPrivateKey, + }, + "PrivateKey without Certificate": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + Certificates: []tls.Certificate{{PrivateKey: cert.PrivateKey}}, + }, + expErr: errInvalidCertificate, + }, + "Invalid cipher suites": { + config: &Config{CipherSuites: []CipherSuiteID{0x0000}}, + wantAnyErr: true, + }, + "Valid config": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + Certificates: []tls.Certificate{cert, {Certificate: cert.Certificate, PrivateKey: rsaPrivateKey}}, + }, + }, + "Valid config with get certificate": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + GetCertificate: func(chi *ClientHelloInfo) (*tls.Certificate, error) { + return &tls.Certificate{Certificate: cert.Certificate, PrivateKey: rsaPrivateKey}, nil + }, + }, + }, + "Valid config with get client certificate": { + config: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + GetClientCertificate: func(cri *CertificateRequestInfo) (*tls.Certificate, error) { + return &tls.Certificate{Certificate: cert.Certificate, PrivateKey: rsaPrivateKey}, nil + }, + }, + }, + } + + for name, testCase := range cases { + testCase := testCase + t.Run(name, func(t *testing.T) { + err := validateConfig(testCase.config) + if testCase.expErr != nil || testCase.wantAnyErr { + if testCase.expErr != nil && !errors.Is(err, testCase.expErr) { + t.Fatalf("TestValidateConfig: Config validation error exp(%v) failed(%v)", testCase.expErr, err) + } + if err == nil { + t.Fatalf("TestValidateConfig: Config validation expected an error") + } + } + }) + } +} diff --git a/replace/dtls/conn.go b/replace/dtls/conn.go new file mode 100644 index 000000000..90c2fa1c8 --- /dev/null +++ b/replace/dtls/conn.go @@ -0,0 +1,1040 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/pion/dtls/v2/internal/closer" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + "github.com/pion/logging" + "github.com/pion/transport/v2/connctx" + "github.com/pion/transport/v2/deadline" + "github.com/pion/transport/v2/replaydetector" +) + +const ( + initialTickerInterval = time.Second + cookieLength = 20 + sessionLength = 32 + defaultNamedCurve = elliptic.X25519 + inboundBufferSize = 8192 + // Default replay protection window is specified by RFC 6347 Section 4.1.2.6 + defaultReplayProtectionWindow = 64 +) + +func invalidKeyingLabels() map[string]bool { + return map[string]bool{ + "client finished": true, + "server finished": true, + "master secret": true, + "key expansion": true, + } +} + +// Conn represents a DTLS connection +type Conn struct { + lock sync.RWMutex // Internal lock (must not be public) + nextConn connctx.ConnCtx // Embedded Conn, typically a udpconn we read/write from + fragmentBuffer *fragmentBuffer // out-of-order and missing fragment handling + handshakeCache *handshakeCache // caching of handshake messages for verifyData generation + decrypted chan interface{} // Decrypted Application Data or error, pull by calling `Read` + + state State // Internal state + + maximumTransmissionUnit int + + handshakeCompletedSuccessfully atomic.Value + + encryptedPackets [][]byte + + connectionClosedByUser bool + closeLock sync.Mutex + closed *closer.Closer + handshakeLoopsFinished sync.WaitGroup + + readDeadline *deadline.Deadline + writeDeadline *deadline.Deadline + + log logging.LeveledLogger + + reading chan struct{} + handshakeRecv chan chan struct{} + cancelHandshaker func() + cancelHandshakeReader func() + + fsm *handshakeFSM + + replayProtectionWindow uint +} + +func createConn(ctx context.Context, nextConn net.Conn, config *Config, isClient bool, initialState *State) (*Conn, error) { + err := validateConfig(config) + if err != nil { + return nil, err + } + + if nextConn == nil { + return nil, errNilNextConn + } + + cipherSuites, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil) + if err != nil { + return nil, err + } + + signatureSchemes, err := signaturehash.ParseSignatureSchemes(config.SignatureSchemes, config.InsecureHashes) + if err != nil { + return nil, err + } + + workerInterval := initialTickerInterval + if config.FlightInterval != 0 { + workerInterval = config.FlightInterval + } + + loggerFactory := config.LoggerFactory + if loggerFactory == nil { + loggerFactory = logging.NewDefaultLoggerFactory() + } + + logger := loggerFactory.NewLogger("dtls") + + mtu := config.MTU + if mtu <= 0 { + mtu = defaultMTU + } + + replayProtectionWindow := config.ReplayProtectionWindow + if replayProtectionWindow <= 0 { + replayProtectionWindow = defaultReplayProtectionWindow + } + + c := &Conn{ + nextConn: connctx.New(nextConn), + fragmentBuffer: newFragmentBuffer(), + handshakeCache: newHandshakeCache(), + maximumTransmissionUnit: mtu, + + decrypted: make(chan interface{}, 1), + log: logger, + + readDeadline: deadline.New(), + writeDeadline: deadline.New(), + + reading: make(chan struct{}, 1), + handshakeRecv: make(chan chan struct{}), + closed: closer.NewCloser(), + cancelHandshaker: func() {}, + + replayProtectionWindow: uint(replayProtectionWindow), + + state: State{ + isClient: isClient, + }, + } + + c.setRemoteEpoch(0) + c.setLocalEpoch(0) + + serverName := config.ServerName + // Do not allow the use of an IP address literal as an SNI value. + // See RFC 6066, Section 3. + if net.ParseIP(serverName) != nil { + serverName = "" + } + + curves := config.EllipticCurves + if len(curves) == 0 { + curves = defaultCurves + } + + hsCfg := &handshakeConfig{ + localPSKCallback: config.PSK, + localPSKIdentityHint: config.PSKIdentityHint, + localCipherSuites: cipherSuites, + localSignatureSchemes: signatureSchemes, + extendedMasterSecret: config.ExtendedMasterSecret, + localSRTPProtectionProfiles: config.SRTPProtectionProfiles, + serverName: serverName, + supportedProtocols: config.SupportedProtocols, + clientAuth: config.ClientAuth, + localCertificates: config.Certificates, + insecureSkipVerify: config.InsecureSkipVerify, + verifyPeerCertificate: config.VerifyPeerCertificate, + verifyConnection: config.VerifyConnection, + rootCAs: config.RootCAs, + clientCAs: config.ClientCAs, + customCipherSuites: config.CustomCipherSuites, + retransmitInterval: workerInterval, + log: logger, + initialEpoch: 0, + keyLogWriter: config.KeyLogWriter, + sessionStore: config.SessionStore, + ellipticCurves: curves, + localGetCertificate: config.GetCertificate, + localGetClientCertificate: config.GetClientCertificate, + insecureSkipHelloVerify: config.InsecureSkipVerifyHello, + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + customClientHelloRandom: config.CustomClientHelloRandom, + } + + // rfc5246#section-7.4.3 + // In addition, the hash and signature algorithms MUST be compatible + // with the key in the server's end-entity certificate. + if !isClient { + cert, err := hsCfg.getCertificate(&ClientHelloInfo{}) + if err != nil && !errors.Is(err, errNoCertificates) { + return nil, err + } + hsCfg.localCipherSuites = filterCipherSuitesForCertificate(cert, cipherSuites) + } + + var initialFlight flightVal + var initialFSMState handshakeState + + if initialState != nil { + if c.state.isClient { + initialFlight = flight5 + } else { + initialFlight = flight6 + } + initialFSMState = handshakeFinished + + c.state = *initialState + } else { + if c.state.isClient { + initialFlight = flight1 + } else { + initialFlight = flight0 + } + initialFSMState = handshakePreparing + } + // Do handshake + if err := c.handshake(ctx, hsCfg, initialFlight, initialFSMState); err != nil { + return nil, err + } + + c.log.Trace("Handshake Completed") + + return c, nil +} + +// Dial connects to the given network address and establishes a DTLS connection on top. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use DialWithContext() instead. +func Dial(network string, raddr *net.UDPAddr, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return DialWithContext(ctx, network, raddr, config) +} + +// Client establishes a DTLS connection over an existing connection. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use ClientWithContext() instead. +func Client(conn net.Conn, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return ClientWithContext(ctx, conn, config) +} + +// Server listens for incoming DTLS connections. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use ServerWithContext() instead. +func Server(conn net.Conn, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return ServerWithContext(ctx, conn, config) +} + +// DialWithContext connects to the given network address and establishes a DTLS connection on top. +func DialWithContext(ctx context.Context, network string, raddr *net.UDPAddr, config *Config) (*Conn, error) { + pConn, err := net.DialUDP(network, nil, raddr) + if err != nil { + return nil, err + } + return ClientWithContext(ctx, pConn, config) +} + +// ClientWithContext establishes a DTLS connection over an existing connection. +func ClientWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) { + switch { + case config == nil: + return nil, errNoConfigProvided + case config.PSK != nil && config.PSKIdentityHint == nil: + return nil, errPSKAndIdentityMustBeSetForClient + } + + return createConn(ctx, conn, config, true, nil) +} + +// ServerWithContext listens for incoming DTLS connections. +func ServerWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) { + if config == nil { + return nil, errNoConfigProvided + } + + return createConn(ctx, conn, config, false, nil) +} + +// Read reads data from the connection. +func (c *Conn) Read(p []byte) (n int, err error) { + if !c.isHandshakeCompletedSuccessfully() { + return 0, errHandshakeInProgress + } + + select { + case <-c.readDeadline.Done(): + return 0, errDeadlineExceeded + default: + } + + for { + select { + case <-c.readDeadline.Done(): + return 0, errDeadlineExceeded + case out, ok := <-c.decrypted: + if !ok { + return 0, io.EOF + } + switch val := out.(type) { + case ([]byte): + if len(p) < len(val) { + return 0, errBufferTooSmall + } + copy(p, val) + return len(val), nil + case (error): + return 0, val + } + } + } +} + +// Write writes len(p) bytes from p to the DTLS connection +func (c *Conn) Write(p []byte) (int, error) { + if c.isConnectionClosed() { + return 0, ErrConnClosed + } + + select { + case <-c.writeDeadline.Done(): + return 0, errDeadlineExceeded + default: + } + + if !c.isHandshakeCompletedSuccessfully() { + return 0, errHandshakeInProgress + } + + return len(p), c.writePackets(c.writeDeadline, []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Epoch: c.state.getLocalEpoch(), + Version: protocol.Version1_2, + }, + Content: &protocol.ApplicationData{ + Data: p, + }, + }, + shouldEncrypt: true, + }, + }) +} + +// Close closes the connection. +func (c *Conn) Close() error { + err := c.close(true) //nolint:contextcheck + c.handshakeLoopsFinished.Wait() + return err +} + +// ConnectionState returns basic DTLS details about the connection. +// Note that this replaced the `Export` function of v1. +func (c *Conn) ConnectionState() State { + c.lock.RLock() + defer c.lock.RUnlock() + return *c.state.clone() +} + +// SelectedSRTPProtectionProfile returns the selected SRTPProtectionProfile +func (c *Conn) SelectedSRTPProtectionProfile() (SRTPProtectionProfile, bool) { + profile := c.state.getSRTPProtectionProfile() + if profile == 0 { + return 0, false + } + + return profile, true +} + +func (c *Conn) writePackets(ctx context.Context, pkts []*packet) error { + c.lock.Lock() + defer c.lock.Unlock() + + var rawPackets [][]byte + + for _, p := range pkts { + if h, ok := p.record.Content.(*handshake.Handshake); ok { + handshakeRaw, err := p.record.Marshal() + if err != nil { + return err + } + + c.log.Tracef("[handshake:%v] -> %s (epoch: %d, seq: %d)", + srvCliStr(c.state.isClient), h.Header.Type.String(), + p.record.Header.Epoch, h.Header.MessageSequence) + c.handshakeCache.push(handshakeRaw[recordlayer.HeaderSize:], p.record.Header.Epoch, h.Header.MessageSequence, h.Header.Type, c.state.isClient) + + rawHandshakePackets, err := c.processHandshakePacket(p, h) + if err != nil { + return err + } + rawPackets = append(rawPackets, rawHandshakePackets...) + } else { + rawPacket, err := c.processPacket(p) + if err != nil { + return err + } + rawPackets = append(rawPackets, rawPacket) + } + } + if len(rawPackets) == 0 { + return nil + } + compactedRawPackets := c.compactRawPackets(rawPackets) + + for _, compactedRawPackets := range compactedRawPackets { + if _, err := c.nextConn.WriteContext(ctx, compactedRawPackets); err != nil { + return netError(err) + } + } + + return nil +} + +func (c *Conn) compactRawPackets(rawPackets [][]byte) [][]byte { + // avoid a useless copy in the common case + if len(rawPackets) == 1 { + return rawPackets + } + + combinedRawPackets := make([][]byte, 0) + currentCombinedRawPacket := make([]byte, 0) + + for _, rawPacket := range rawPackets { + if len(currentCombinedRawPacket) > 0 && len(currentCombinedRawPacket)+len(rawPacket) >= c.maximumTransmissionUnit { + combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket) + currentCombinedRawPacket = []byte{} + } + currentCombinedRawPacket = append(currentCombinedRawPacket, rawPacket...) + } + + combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket) + + return combinedRawPackets +} + +func (c *Conn) processPacket(p *packet) ([]byte, error) { + epoch := p.record.Header.Epoch + for len(c.state.localSequenceNumber) <= int(epoch) { + c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0)) + } + seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1 + if seq > recordlayer.MaxSequenceNumber { + // RFC 6347 Section 4.1.0 + // The implementation must either abandon an association or rehandshake + // prior to allowing the sequence number to wrap. + return nil, errSequenceNumberOverflow + } + p.record.Header.SequenceNumber = seq + + rawPacket, err := p.record.Marshal() + if err != nil { + return nil, err + } + + if p.shouldEncrypt { + var err error + rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket) + if err != nil { + return nil, err + } + } + + return rawPacket, nil +} + +func (c *Conn) processHandshakePacket(p *packet, h *handshake.Handshake) ([][]byte, error) { + rawPackets := make([][]byte, 0) + + handshakeFragments, err := c.fragmentHandshake(h) + if err != nil { + return nil, err + } + epoch := p.record.Header.Epoch + for len(c.state.localSequenceNumber) <= int(epoch) { + c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0)) + } + + for _, handshakeFragment := range handshakeFragments { + seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1 + if seq > recordlayer.MaxSequenceNumber { + return nil, errSequenceNumberOverflow + } + + recordlayerHeader := &recordlayer.Header{ + Version: p.record.Header.Version, + ContentType: p.record.Header.ContentType, + ContentLen: uint16(len(handshakeFragment)), + Epoch: p.record.Header.Epoch, + SequenceNumber: seq, + } + + rawPacket, err := recordlayerHeader.Marshal() + if err != nil { + return nil, err + } + + p.record.Header = *recordlayerHeader + + rawPacket = append(rawPacket, handshakeFragment...) + if p.shouldEncrypt { + var err error + rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket) + if err != nil { + return nil, err + } + } + + rawPackets = append(rawPackets, rawPacket) + } + + return rawPackets, nil +} + +func (c *Conn) fragmentHandshake(h *handshake.Handshake) ([][]byte, error) { + content, err := h.Message.Marshal() + if err != nil { + return nil, err + } + + fragmentedHandshakes := make([][]byte, 0) + + contentFragments := splitBytes(content, c.maximumTransmissionUnit) + if len(contentFragments) == 0 { + contentFragments = [][]byte{ + {}, + } + } + + offset := 0 + for _, contentFragment := range contentFragments { + contentFragmentLen := len(contentFragment) + + headerFragment := &handshake.Header{ + Type: h.Header.Type, + Length: h.Header.Length, + MessageSequence: h.Header.MessageSequence, + FragmentOffset: uint32(offset), + FragmentLength: uint32(contentFragmentLen), + } + + offset += contentFragmentLen + + fragmentedHandshake, err := headerFragment.Marshal() + if err != nil { + return nil, err + } + + fragmentedHandshake = append(fragmentedHandshake, contentFragment...) + fragmentedHandshakes = append(fragmentedHandshakes, fragmentedHandshake) + } + + return fragmentedHandshakes, nil +} + +var poolReadBuffer = sync.Pool{ //nolint:gochecknoglobals + New: func() interface{} { + b := make([]byte, inboundBufferSize) + return &b + }, +} + +func (c *Conn) readAndBuffer(ctx context.Context) error { + bufptr, ok := poolReadBuffer.Get().(*[]byte) + if !ok { + return errFailedToAccessPoolReadBuffer + } + defer poolReadBuffer.Put(bufptr) + + b := *bufptr + i, err := c.nextConn.ReadContext(ctx, b) + if err != nil { + return netError(err) + } + + pkts, err := recordlayer.UnpackDatagram(b[:i]) + if err != nil { + return err + } + + var hasHandshake bool + for _, p := range pkts { + hs, alert, err := c.handleIncomingPacket(ctx, p, true) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err == nil { + err = alertErr + } + } + } + if hs { + hasHandshake = true + } + + var e *alertError + if errors.As(err, &e) { + if e.IsFatalOrCloseNotify() { + return e + } + } else if err != nil { + return e + } + } + if hasHandshake { + done := make(chan struct{}) + select { + case c.handshakeRecv <- done: + // If the other party may retransmit the flight, + // we should respond even if it not a new message. + <-done + case <-c.fsm.Done(): + } + } + return nil +} + +func (c *Conn) handleQueuedPackets(ctx context.Context) error { + pkts := c.encryptedPackets + c.encryptedPackets = nil + + for _, p := range pkts { + _, alert, err := c.handleIncomingPacket(ctx, p, false) // don't re-enqueue + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err == nil { + err = alertErr + } + } + } + var e *alertError + if errors.As(err, &e) { + if e.IsFatalOrCloseNotify() { + return e + } + } else if err != nil { + return e + } + } + return nil +} + +func (c *Conn) handleIncomingPacket(ctx context.Context, buf []byte, enqueue bool) (bool, *alert.Alert, error) { //nolint:gocognit + h := &recordlayer.Header{} + if err := h.Unmarshal(buf); err != nil { + // Decode error must be silently discarded + // [RFC6347 Section-4.1.2.7] + c.log.Debugf("discarded broken packet: %v", err) + return false, nil, nil + } + + // Validate epoch + remoteEpoch := c.state.getRemoteEpoch() + if h.Epoch > remoteEpoch { + if h.Epoch > remoteEpoch+1 { + c.log.Debugf("discarded future packet (epoch: %d, seq: %d)", + h.Epoch, h.SequenceNumber, + ) + return false, nil, nil + } + if enqueue { + c.log.Debug("received packet of next epoch, queuing packet") + c.encryptedPackets = append(c.encryptedPackets, buf) + } + return false, nil, nil + } + + // Anti-replay protection + for len(c.state.replayDetector) <= int(h.Epoch) { + c.state.replayDetector = append(c.state.replayDetector, + replaydetector.New(c.replayProtectionWindow, recordlayer.MaxSequenceNumber), + ) + } + markPacketAsValid, ok := c.state.replayDetector[int(h.Epoch)].Check(h.SequenceNumber) + if !ok { + c.log.Debugf("discarded duplicated packet (epoch: %d, seq: %d)", + h.Epoch, h.SequenceNumber, + ) + return false, nil, nil + } + + // Decrypt + if h.Epoch != 0 { + if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() { + if enqueue { + c.encryptedPackets = append(c.encryptedPackets, buf) + c.log.Debug("handshake not finished, queuing packet") + } + return false, nil, nil + } + + var err error + buf, err = c.state.cipherSuite.Decrypt(buf) + if err != nil { + c.log.Debugf("%s: decrypt failed: %s", srvCliStr(c.state.isClient), err) + return false, nil, nil + } + } + + isHandshake, err := c.fragmentBuffer.push(append([]byte{}, buf...)) + if err != nil { + // Decode error must be silently discarded + // [RFC6347 Section-4.1.2.7] + c.log.Debugf("defragment failed: %s", err) + return false, nil, nil + } else if isHandshake { + markPacketAsValid() + for out, epoch := c.fragmentBuffer.pop(); out != nil; out, epoch = c.fragmentBuffer.pop() { + header := &handshake.Header{} + if err := header.Unmarshal(out); err != nil { + c.log.Debugf("%s: handshake parse failed: %s", srvCliStr(c.state.isClient), err) + continue + } + c.handshakeCache.push(out, epoch, header.MessageSequence, header.Type, !c.state.isClient) + } + + return true, nil, nil + } + + r := &recordlayer.RecordLayer{} + if err := r.Unmarshal(buf); err != nil { + return false, &alert.Alert{Level: alert.Fatal, Description: alert.DecodeError}, err + } + + switch content := r.Content.(type) { + case *alert.Alert: + c.log.Tracef("%s: <- %s", srvCliStr(c.state.isClient), content.String()) + var a *alert.Alert + if content.Description == alert.CloseNotify { + // Respond with a close_notify [RFC5246 Section 7.2.1] + a = &alert.Alert{Level: alert.Warning, Description: alert.CloseNotify} + } + markPacketAsValid() + return false, a, &alertError{content} + case *protocol.ChangeCipherSpec: + if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() { + if enqueue { + c.encryptedPackets = append(c.encryptedPackets, buf) + c.log.Debugf("CipherSuite not initialized, queuing packet") + } + return false, nil, nil + } + + newRemoteEpoch := h.Epoch + 1 + c.log.Tracef("%s: <- ChangeCipherSpec (epoch: %d)", srvCliStr(c.state.isClient), newRemoteEpoch) + + if c.state.getRemoteEpoch()+1 == newRemoteEpoch { + c.setRemoteEpoch(newRemoteEpoch) + markPacketAsValid() + } + case *protocol.ApplicationData: + if h.Epoch == 0 { + return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errApplicationDataEpochZero + } + + markPacketAsValid() + + select { + case c.decrypted <- content.Data: + case <-c.closed.Done(): + case <-ctx.Done(): + } + + default: + return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, fmt.Errorf("%w: %d", errUnhandledContextType, content.ContentType()) + } + return false, nil, nil +} + +func (c *Conn) recvHandshake() <-chan chan struct{} { + return c.handshakeRecv +} + +func (c *Conn) notify(ctx context.Context, level alert.Level, desc alert.Description) error { + if level == alert.Fatal && len(c.state.SessionID) > 0 { + // According to the RFC, we need to delete the stored session. + // https://datatracker.ietf.org/doc/html/rfc5246#section-7.2 + if ss := c.fsm.cfg.sessionStore; ss != nil { + c.log.Tracef("clean invalid session: %s", c.state.SessionID) + if err := ss.Del(c.sessionKey()); err != nil { + return err + } + } + } + return c.writePackets(ctx, []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Epoch: c.state.getLocalEpoch(), + Version: protocol.Version1_2, + }, + Content: &alert.Alert{ + Level: level, + Description: desc, + }, + }, + shouldEncrypt: c.isHandshakeCompletedSuccessfully(), + }, + }) +} + +func (c *Conn) setHandshakeCompletedSuccessfully() { + c.handshakeCompletedSuccessfully.Store(struct{ bool }{true}) +} + +func (c *Conn) isHandshakeCompletedSuccessfully() bool { + boolean, _ := c.handshakeCompletedSuccessfully.Load().(struct{ bool }) + return boolean.bool +} + +func (c *Conn) handshake(ctx context.Context, cfg *handshakeConfig, initialFlight flightVal, initialState handshakeState) error { //nolint:gocognit + c.fsm = newHandshakeFSM(&c.state, c.handshakeCache, cfg, initialFlight) + + done := make(chan struct{}) + ctxRead, cancelRead := context.WithCancel(context.Background()) + c.cancelHandshakeReader = cancelRead + cfg.onFlightState = func(f flightVal, s handshakeState) { + if s == handshakeFinished && !c.isHandshakeCompletedSuccessfully() { + c.setHandshakeCompletedSuccessfully() + close(done) + } + } + + // [Psiphon] + // Pass dial context into handshake goroutine for GetDTLSSeed. + ctxHs, cancel := context.WithCancel(ctx) + c.cancelHandshaker = cancel + + firstErr := make(chan error, 1) + + c.handshakeLoopsFinished.Add(2) + + // Handshake routine should be live until close. + // The other party may request retransmission of the last flight to cope with packet drop. + go func() { + defer c.handshakeLoopsFinished.Done() + err := c.fsm.Run(ctxHs, c, initialState) + if !errors.Is(err, context.Canceled) { + select { + case firstErr <- err: + default: + } + } + }() + go func() { + defer func() { + // Escaping read loop. + // It's safe to close decrypted channnel now. + close(c.decrypted) + + // Force stop handshaker when the underlying connection is closed. + cancel() + }() + defer c.handshakeLoopsFinished.Done() + for { + if err := c.readAndBuffer(ctxRead); err != nil { + var e *alertError + if errors.As(err, &e) { + if !e.IsFatalOrCloseNotify() { + if c.isHandshakeCompletedSuccessfully() { + // Pass the error to Read() + select { + case c.decrypted <- err: + case <-c.closed.Done(): + case <-ctxRead.Done(): + } + } + continue // non-fatal alert must not stop read loop + } + } else { + switch { + case errors.Is(err, context.DeadlineExceeded), errors.Is(err, context.Canceled), errors.Is(err, io.EOF), errors.Is(err, net.ErrClosed): + case errors.Is(err, recordlayer.ErrInvalidPacketLength): + // Decode error must be silently discarded + // [RFC6347 Section-4.1.2.7] + continue + default: + if c.isHandshakeCompletedSuccessfully() { + // Keep read loop and pass the read error to Read() + select { + case c.decrypted <- err: + case <-c.closed.Done(): + case <-ctxRead.Done(): + } + continue // non-fatal alert must not stop read loop + } + } + } + + select { + case firstErr <- err: + default: + } + + if e != nil { + if e.IsFatalOrCloseNotify() { + _ = c.close(false) //nolint:contextcheck + } + } + if !c.isConnectionClosed() && errors.Is(err, context.Canceled) { + c.log.Trace("handshake timeouts - closing underline connection") + _ = c.close(false) //nolint:contextcheck + } + return + } + } + }() + + select { + case err := <-firstErr: + cancelRead() + cancel() + c.handshakeLoopsFinished.Wait() + return c.translateHandshakeCtxError(err) + case <-ctx.Done(): + cancelRead() + cancel() + c.handshakeLoopsFinished.Wait() + return c.translateHandshakeCtxError(ctx.Err()) + case <-done: + return nil + } +} + +func (c *Conn) translateHandshakeCtxError(err error) error { + if err == nil { + return nil + } + if errors.Is(err, context.Canceled) && c.isHandshakeCompletedSuccessfully() { + return nil + } + return &HandshakeError{Err: err} +} + +func (c *Conn) close(byUser bool) error { + c.cancelHandshaker() + c.cancelHandshakeReader() + + if c.isHandshakeCompletedSuccessfully() && byUser { + // Discard error from notify() to return non-error on the first user call of Close() + // even if the underlying connection is already closed. + _ = c.notify(context.Background(), alert.Warning, alert.CloseNotify) + } + + c.closeLock.Lock() + // Don't return ErrConnClosed at the first time of the call from user. + closedByUser := c.connectionClosedByUser + if byUser { + c.connectionClosedByUser = true + } + isClosed := c.isConnectionClosed() + c.closed.Close() + c.closeLock.Unlock() + + if closedByUser { + return ErrConnClosed + } + + if isClosed { + return nil + } + + return c.nextConn.Close() +} + +func (c *Conn) isConnectionClosed() bool { + select { + case <-c.closed.Done(): + return true + default: + return false + } +} + +func (c *Conn) setLocalEpoch(epoch uint16) { + c.state.localEpoch.Store(epoch) +} + +func (c *Conn) setRemoteEpoch(epoch uint16) { + c.state.remoteEpoch.Store(epoch) +} + +// LocalAddr implements net.Conn.LocalAddr +func (c *Conn) LocalAddr() net.Addr { + return c.nextConn.LocalAddr() +} + +// RemoteAddr implements net.Conn.RemoteAddr +func (c *Conn) RemoteAddr() net.Addr { + return c.nextConn.RemoteAddr() +} + +func (c *Conn) sessionKey() []byte { + if c.state.isClient { + // As ServerName can be like 0.example.com, it's better to add + // delimiter character which is not allowed to be in + // neither address or domain name. + return []byte(c.nextConn.RemoteAddr().String() + "_" + c.fsm.cfg.serverName) + } + return c.state.SessionID +} + +// SetDeadline implements net.Conn.SetDeadline +func (c *Conn) SetDeadline(t time.Time) error { + c.readDeadline.Set(t) + return c.SetWriteDeadline(t) +} + +// SetReadDeadline implements net.Conn.SetReadDeadline +func (c *Conn) SetReadDeadline(t time.Time) error { + c.readDeadline.Set(t) + // Read deadline is fully managed by this layer. + // Don't set read deadline to underlying connection. + return nil +} + +// SetWriteDeadline implements net.Conn.SetWriteDeadline +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline.Set(t) + // Write deadline is also fully managed by this layer. + return nil +} diff --git a/replace/dtls/conn_go_test.go b/replace/dtls/conn_go_test.go new file mode 100644 index 000000000..99e6f74c4 --- /dev/null +++ b/replace/dtls/conn_go_test.go @@ -0,0 +1,178 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build !js +// +build !js + +package dtls + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "net" + "testing" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + "github.com/pion/transport/v2/dpipe" + "github.com/pion/transport/v2/test" +) + +func TestContextConfig(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + addrListen, err := net.ResolveUDPAddr("udp", "localhost:0") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Dummy listener + listen, err := net.ListenUDP("udp", addrListen) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer func() { + _ = listen.Close() + }() + addr, ok := listen.LocalAddr().(*net.UDPAddr) + if !ok { + t.Fatal("Failed to cast net.UDPAddr") + } + + cert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + config := &Config{ + ConnectContextMaker: func() (context.Context, func()) { + return context.WithTimeout(context.Background(), 40*time.Millisecond) + }, + Certificates: []tls.Certificate{cert}, + } + + dials := map[string]struct { + f func() (func() (net.Conn, error), func()) + order []byte + }{ + "Dial": { + f: func() (func() (net.Conn, error), func()) { + return func() (net.Conn, error) { + return Dial("udp", addr, config) + }, func() { + } + }, + order: []byte{0, 1, 2}, + }, + "DialWithContext": { + f: func() (func() (net.Conn, error), func()) { + ctx, cancel := context.WithTimeout(context.Background(), 80*time.Millisecond) + return func() (net.Conn, error) { + return DialWithContext(ctx, "udp", addr, config) + }, func() { + cancel() + } + }, + order: []byte{0, 2, 1}, + }, + "Client": { + f: func() (func() (net.Conn, error), func()) { + ca, _ := dpipe.Pipe() + return func() (net.Conn, error) { + return Client(ca, config) + }, func() { + _ = ca.Close() + } + }, + order: []byte{0, 1, 2}, + }, + "ClientWithContext": { + f: func() (func() (net.Conn, error), func()) { + ctx, cancel := context.WithTimeout(context.Background(), 80*time.Millisecond) + ca, _ := dpipe.Pipe() + return func() (net.Conn, error) { + return ClientWithContext(ctx, ca, config) + }, func() { + cancel() + _ = ca.Close() + } + }, + order: []byte{0, 2, 1}, + }, + "Server": { + f: func() (func() (net.Conn, error), func()) { + ca, _ := dpipe.Pipe() + return func() (net.Conn, error) { + return Server(ca, config) + }, func() { + _ = ca.Close() + } + }, + order: []byte{0, 1, 2}, + }, + "ServerWithContext": { + f: func() (func() (net.Conn, error), func()) { + ctx, cancel := context.WithTimeout(context.Background(), 80*time.Millisecond) + ca, _ := dpipe.Pipe() + return func() (net.Conn, error) { + return ServerWithContext(ctx, ca, config) + }, func() { + cancel() + _ = ca.Close() + } + }, + order: []byte{0, 2, 1}, + }, + } + + for name, dial := range dials { + dial := dial + t.Run(name, func(t *testing.T) { + done := make(chan struct{}) + + go func() { + d, cancel := dial.f() + conn, err := d() + defer cancel() + var netError net.Error + if !errors.As(err, &netError) || !netError.Temporary() { //nolint:staticcheck + t.Errorf("Client error exp(Temporary network error) failed(%v)", err) + close(done) + return + } + done <- struct{}{} + if err == nil { + _ = conn.Close() + } + }() + + var order []byte + early := time.After(20 * time.Millisecond) + late := time.After(60 * time.Millisecond) + func() { + for len(order) < 3 { + select { + case <-early: + order = append(order, 0) + case _, ok := <-done: + if !ok { + return + } + order = append(order, 1) + case <-late: + order = append(order, 2) + } + } + }() + if !bytes.Equal(dial.order, order) { + t.Errorf("Invalid cancel timing, expected: %v, got: %v", dial.order, order) + } + }) + } +} diff --git a/replace/dtls/conn_test.go b/replace/dtls/conn_test.go new file mode 100644 index 000000000..ea3c842f7 --- /dev/null +++ b/replace/dtls/conn_test.go @@ -0,0 +1,3052 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + cryptoElliptic "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + "github.com/pion/dtls/v2/pkg/crypto/signature" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + "github.com/pion/logging" + "github.com/pion/transport/v2/dpipe" + "github.com/pion/transport/v2/test" +) + +var ( + errTestPSKInvalidIdentity = errors.New("TestPSK: Server got invalid identity") + errPSKRejected = errors.New("PSK Rejected") + errNotExpectedChain = errors.New("not expected chain") + errExpecedChain = errors.New("expected chain") + errWrongCert = errors.New("wrong cert") +) + +func TestStressDuplex(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + // Run the test + stressDuplex(t) +} + +func stressDuplex(t *testing.T) { + ca, cb, err := pipeMemory() + if err != nil { + t.Fatal(err) + } + + defer func() { + err = ca.Close() + if err != nil { + t.Fatal(err) + } + err = cb.Close() + if err != nil { + t.Fatal(err) + } + }() + + opt := test.Options{ + MsgSize: 2048, + MsgCount: 100, + } + + err = test.StressDuplex(ca, cb, opt) + if err != nil { + t.Fatal(err) + } +} + +func TestRoutineLeakOnClose(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(5 * time.Second) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + ca, cb, err := pipeMemory() + if err != nil { + t.Fatal(err) + } + + if _, err := ca.Write(make([]byte, 100)); err != nil { + t.Fatal(err) + } + if err := cb.Close(); err != nil { + t.Fatal(err) + } + if err := ca.Close(); err != nil { + t.Fatal(err) + } + // Packet is sent, but not read. + // inboundLoop routine should not be leaked. +} + +func TestReadWriteDeadline(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(5 * time.Second) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + var e net.Error + + ca, cb, err := pipeMemory() + if err != nil { + t.Fatal(err) + } + + if err := ca.SetDeadline(time.Unix(0, 1)); err != nil { + t.Fatal(err) + } + _, werr := ca.Write(make([]byte, 100)) + if errors.As(werr, &e) { + if !e.Timeout() { + t.Error("Deadline exceeded Write must return Timeout error") + } + if !e.Temporary() { //nolint:staticcheck + t.Error("Deadline exceeded Write must return Temporary error") + } + } else { + t.Error("Write must return net.Error error") + } + _, rerr := ca.Read(make([]byte, 100)) + if errors.As(rerr, &e) { + if !e.Timeout() { + t.Error("Deadline exceeded Read must return Timeout error") + } + if !e.Temporary() { //nolint:staticcheck + t.Error("Deadline exceeded Read must return Temporary error") + } + } else { + t.Error("Read must return net.Error error") + } + if err := ca.SetDeadline(time.Time{}); err != nil { + t.Error(err) + } + + if err := ca.Close(); err != nil { + t.Error(err) + } + if err := cb.Close(); err != nil { + t.Error(err) + } + + if _, err := ca.Write(make([]byte, 100)); !errors.Is(err, ErrConnClosed) { + t.Errorf("Write must return %v after close, got %v", ErrConnClosed, err) + } + if _, err := ca.Read(make([]byte, 100)); !errors.Is(err, io.EOF) { + t.Errorf("Read must return %v after close, got %v", io.EOF, err) + } +} + +func TestSequenceNumberOverflow(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(5 * time.Second) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + t.Run("ApplicationData", func(t *testing.T) { + ca, cb, err := pipeMemory() + if err != nil { + t.Fatal(err) + } + + atomic.StoreUint64(&ca.state.localSequenceNumber[1], recordlayer.MaxSequenceNumber) + if _, werr := ca.Write(make([]byte, 100)); werr != nil { + t.Errorf("Write must send message with maximum sequence number, but errord: %v", werr) + } + if _, werr := ca.Write(make([]byte, 100)); !errors.Is(werr, errSequenceNumberOverflow) { + t.Errorf("Write must abandonsend message with maximum sequence number, but errord: %v", werr) + } + + if err := ca.Close(); err != nil { + t.Error(err) + } + if err := cb.Close(); err != nil { + t.Error(err) + } + }) + t.Run("Handshake", func(t *testing.T) { + ca, cb, err := pipeMemory() + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + atomic.StoreUint64(&ca.state.localSequenceNumber[0], recordlayer.MaxSequenceNumber+1) + + // Try to send handshake packet. + if werr := ca.writePackets(ctx, []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + Cookie: make([]byte, 64), + CipherSuiteIDs: cipherSuiteIDs(defaultCipherSuites()), + CompressionMethods: defaultCompressionMethods(), + }, + }, + }, + }, + }); !errors.Is(werr, errSequenceNumberOverflow) { + t.Errorf("Connection must fail on handshake packet reaches maximum sequence number") + } + + if err := ca.Close(); err != nil { + t.Error(err) + } + if err := cb.Close(); err != nil { + t.Error(err) + } + }) +} + +func pipeMemory() (*Conn, *Conn, error) { + // In memory pipe + ca, cb := dpipe.Pipe() + return pipeConn(ca, cb) +} + +func pipeConn(ca, cb net.Conn) (*Conn, *Conn, error) { + type result struct { + c *Conn + err error + } + + c := make(chan result) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Setup client + go func() { + client, err := testClient(ctx, ca, &Config{SRTPProtectionProfiles: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}}, true) + c <- result{client, err} + }() + + // Setup server + server, err := testServer(ctx, cb, &Config{SRTPProtectionProfiles: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}}, true) + if err != nil { + return nil, nil, err + } + + // Receive client + res := <-c + if res.err != nil { + _ = server.Close() + return nil, nil, res.err + } + + return res.c, server, nil +} + +func testClient(ctx context.Context, c net.Conn, cfg *Config, generateCertificate bool) (*Conn, error) { + if generateCertificate { + clientCert, err := selfsign.GenerateSelfSigned() + if err != nil { + return nil, err + } + cfg.Certificates = []tls.Certificate{clientCert} + } + cfg.InsecureSkipVerify = true + return ClientWithContext(ctx, c, cfg) +} + +func testServer(ctx context.Context, c net.Conn, cfg *Config, generateCertificate bool) (*Conn, error) { + if generateCertificate { + serverCert, err := selfsign.GenerateSelfSigned() + if err != nil { + return nil, err + } + cfg.Certificates = []tls.Certificate{serverCert} + } + return ServerWithContext(ctx, c, cfg) +} + +func sendClientHello(cookie []byte, ca net.Conn, sequenceNumber uint64, extensions []extension.Extension) error { + packet, err := (&recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + SequenceNumber: sequenceNumber, + }, + Content: &handshake.Handshake{ + Header: handshake.Header{ + MessageSequence: uint16(sequenceNumber), + }, + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + Cookie: cookie, + CipherSuiteIDs: cipherSuiteIDs(defaultCipherSuites()), + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + }).Marshal() + if err != nil { + return err + } + + if _, err = ca.Write(packet); err != nil { + return err + } + return nil +} + +func TestHandshakeWithAlert(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cases := map[string]struct { + configServer, configClient *Config + errServer, errClient error + }{ + "CipherSuiteNoIntersection": { + configServer: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, + configClient: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + errServer: errCipherSuiteNoIntersection, + errClient: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + }, + "SignatureSchemesNoIntersection": { + configServer: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + SignatureSchemes: []tls.SignatureScheme{tls.ECDSAWithP256AndSHA256}, + }, + configClient: &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + SignatureSchemes: []tls.SignatureScheme{tls.ECDSAWithP521AndSHA512}, + }, + errServer: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + errClient: errNoAvailableSignatureSchemes, + }, + } + + for name, testCase := range cases { + testCase := testCase + t.Run(name, func(t *testing.T) { + clientErr := make(chan error, 1) + + ca, cb := dpipe.Pipe() + go func() { + _, err := testClient(ctx, ca, testCase.configClient, true) + clientErr <- err + }() + + _, errServer := testServer(ctx, cb, testCase.configServer, true) + if !errors.Is(errServer, testCase.errServer) { + t.Fatalf("Server error exp(%v) failed(%v)", testCase.errServer, errServer) + } + + errClient := <-clientErr + if !errors.Is(errClient, testCase.errClient) { + t.Fatalf("Client error exp(%v) failed(%v)", testCase.errClient, errClient) + } + }) + } +} + +func TestHandshakeWithInvalidRecord(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + type result struct { + c *Conn + err error + } + clientErr := make(chan result, 1) + ca, cb := dpipe.Pipe() + caWithInvalidRecord := &connWithCallback{Conn: ca} + + var msgSeq atomic.Int32 + // Send invalid record after first message + caWithInvalidRecord.onWrite = func(b []byte) { + if msgSeq.Add(1) == 2 { + if _, err := ca.Write([]byte{0x01, 0x02}); err != nil { + t.Fatal(err) + } + } + } + go func() { + client, err := testClient(ctx, caWithInvalidRecord, &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, true) + clientErr <- result{client, err} + }() + + server, errServer := testServer(ctx, cb, &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, true) + + errClient := <-clientErr + + defer func() { + if server != nil { + if err := server.Close(); err != nil { + t.Fatal(err) + } + } + + if errClient.c != nil { + if err := errClient.c.Close(); err != nil { + t.Fatal(err) + } + } + }() + + if errServer != nil { + t.Fatalf("Server failed(%v)", errServer) + } + + if errClient.err != nil { + t.Fatalf("Client failed(%v)", errClient.err) + } +} + +func TestExportKeyingMaterial(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + var rand [28]byte + exportLabel := "EXTRACTOR-dtls_srtp" + + expectedServerKey := []byte{0x61, 0x09, 0x9d, 0x7d, 0xcb, 0x08, 0x52, 0x2c, 0xe7, 0x7b} + expectedClientKey := []byte{0x87, 0xf0, 0x40, 0x02, 0xf6, 0x1c, 0xf1, 0xfe, 0x8c, 0x77} + + c := &Conn{ + state: State{ + localRandom: handshake.Random{GMTUnixTime: time.Unix(500, 0), RandomBytes: rand}, + remoteRandom: handshake.Random{GMTUnixTime: time.Unix(1000, 0), RandomBytes: rand}, + localSequenceNumber: []uint64{0, 0}, + cipherSuite: &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + }, + } + c.setLocalEpoch(0) + c.setRemoteEpoch(0) + + state := c.ConnectionState() + _, err := state.ExportKeyingMaterial(exportLabel, nil, 0) + if !errors.Is(err, errHandshakeInProgress) { + t.Errorf("ExportKeyingMaterial when epoch == 0: expected '%s' actual '%s'", errHandshakeInProgress, err) + } + + c.setLocalEpoch(1) + state = c.ConnectionState() + _, err = state.ExportKeyingMaterial(exportLabel, []byte{0x00}, 0) + if !errors.Is(err, errContextUnsupported) { + t.Errorf("ExportKeyingMaterial with context: expected '%s' actual '%s'", errContextUnsupported, err) + } + + for k := range invalidKeyingLabels() { + state = c.ConnectionState() + _, err = state.ExportKeyingMaterial(k, nil, 0) + if !errors.Is(err, errReservedExportKeyingMaterial) { + t.Errorf("ExportKeyingMaterial reserved label: expected '%s' actual '%s'", errReservedExportKeyingMaterial, err) + } + } + + state = c.ConnectionState() + keyingMaterial, err := state.ExportKeyingMaterial(exportLabel, nil, 10) + if err != nil { + t.Errorf("ExportKeyingMaterial as server: unexpected error '%s'", err) + } else if !bytes.Equal(keyingMaterial, expectedServerKey) { + t.Errorf("ExportKeyingMaterial client export: expected (% 02x) actual (% 02x)", expectedServerKey, keyingMaterial) + } + + c.state.isClient = true + state = c.ConnectionState() + keyingMaterial, err = state.ExportKeyingMaterial(exportLabel, nil, 10) + if err != nil { + t.Errorf("ExportKeyingMaterial as server: unexpected error '%s'", err) + } else if !bytes.Equal(keyingMaterial, expectedClientKey) { + t.Errorf("ExportKeyingMaterial client export: expected (% 02x) actual (% 02x)", expectedClientKey, keyingMaterial) + } +} + +func TestPSK(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ServerIdentity []byte + CipherSuites []CipherSuiteID + ClientVerifyConnection func(*State) error + ServerVerifyConnection func(*State) error + WantFail bool + ExpectedServerErr string + ExpectedClientErr string + }{ + { + Name: "Server identity specified", + ServerIdentity: []byte("Test Identity"), + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + }, + { + Name: "Server identity specified - Server verify connection fails", + ServerIdentity: []byte("Test Identity"), + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + ServerVerifyConnection: func(s *State) error { + return errExample + }, + WantFail: true, + ExpectedServerErr: errExample.Error(), + ExpectedClientErr: alert.BadCertificate.String(), + }, + { + Name: "Server identity specified - Client verify connection fails", + ServerIdentity: []byte("Test Identity"), + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + ClientVerifyConnection: func(s *State) error { + return errExample + }, + WantFail: true, + ExpectedServerErr: alert.BadCertificate.String(), + ExpectedClientErr: errExample.Error(), + }, + { + Name: "Server identity nil", + ServerIdentity: nil, + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + }, + { + Name: "TLS_PSK_WITH_AES_128_CBC_SHA256", + ServerIdentity: nil, + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CBC_SHA256}, + }, + { + Name: "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256", + ServerIdentity: nil, + CipherSuites: []CipherSuiteID{TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256}, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clientIdentity := []byte("Client Identity") + type result struct { + c *Conn + err error + } + clientRes := make(chan result, 1) + + ca, cb := dpipe.Pipe() + go func() { + conf := &Config{ + PSK: func(hint []byte) ([]byte, error) { + if !bytes.Equal(test.ServerIdentity, hint) { + return nil, fmt.Errorf("TestPSK: Client got invalid identity expected(% 02x) actual(% 02x)", test.ServerIdentity, hint) //nolint:goerr113 + } + + return []byte{0xAB, 0xC1, 0x23}, nil + }, + PSKIdentityHint: clientIdentity, + CipherSuites: test.CipherSuites, + VerifyConnection: test.ClientVerifyConnection, + } + + c, err := testClient(ctx, ca, conf, false) + clientRes <- result{c, err} + }() + + config := &Config{ + PSK: func(hint []byte) ([]byte, error) { + if !bytes.Equal(clientIdentity, hint) { + return nil, fmt.Errorf("%w: expected(% 02x) actual(% 02x)", errTestPSKInvalidIdentity, clientIdentity, hint) + } + return []byte{0xAB, 0xC1, 0x23}, nil + }, + PSKIdentityHint: test.ServerIdentity, + CipherSuites: test.CipherSuites, + VerifyConnection: test.ServerVerifyConnection, + } + + server, err := testServer(ctx, cb, config, false) + if test.WantFail { + res := <-clientRes + if err == nil || !strings.Contains(err.Error(), test.ExpectedServerErr) { + t.Fatalf("TestPSK: Server expected(%v) actual(%v)", test.ExpectedServerErr, err) + } + if res.err == nil || !strings.Contains(res.err.Error(), test.ExpectedClientErr) { + t.Fatalf("TestPSK: Client expected(%v) actual(%v)", test.ExpectedClientErr, res.err) + } + return + } + if err != nil { + t.Fatalf("TestPSK: Server failed(%v)", err) + } + + actualPSKIdentityHint := server.ConnectionState().IdentityHint + if !bytes.Equal(actualPSKIdentityHint, clientIdentity) { + t.Errorf("TestPSK: Server ClientPSKIdentity Mismatch '%s': expected(%v) actual(%v)", test.Name, clientIdentity, actualPSKIdentityHint) + } + + defer func() { + _ = server.Close() + }() + + res := <-clientRes + if res.err != nil { + t.Fatal(res.err) + } + _ = res.c.Close() + }) + } +} + +func TestPSKHintFail(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + serverAlertError := &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InternalError}} + pskRejected := errPSKRejected + + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clientErr := make(chan error, 1) + + ca, cb := dpipe.Pipe() + go func() { + conf := &Config{ + PSK: func(hint []byte) ([]byte, error) { + return nil, pskRejected + }, + PSKIdentityHint: []byte{}, + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + } + + _, err := testClient(ctx, ca, conf, false) + clientErr <- err + }() + + config := &Config{ + PSK: func(hint []byte) ([]byte, error) { + return nil, pskRejected + }, + PSKIdentityHint: []byte{}, + CipherSuites: []CipherSuiteID{TLS_PSK_WITH_AES_128_CCM_8}, + } + + if _, err := testServer(ctx, cb, config, false); !errors.Is(err, serverAlertError) { + t.Fatalf("TestPSK: Server error exp(%v) failed(%v)", serverAlertError, err) + } + + if err := <-clientErr; !errors.Is(err, pskRejected) { + t.Fatalf("TestPSK: Client error exp(%v) failed(%v)", pskRejected, err) + } +} + +func TestClientTimeout(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + clientErr := make(chan error, 1) + + ca, _ := dpipe.Pipe() + go func() { + conf := &Config{} + + c, err := testClient(ctx, ca, conf, true) + if err == nil { + _ = c.Close() //nolint:contextcheck + } + clientErr <- err + }() + + // no server! + err := <-clientErr + var netErr net.Error + if !errors.As(err, &netErr) || !netErr.Timeout() { + t.Fatalf("Client error exp(Temporary network error) failed(%v)", err) + } +} + +func TestSRTPConfiguration(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ClientSRTP []SRTPProtectionProfile + ServerSRTP []SRTPProtectionProfile + ExpectedProfile SRTPProtectionProfile + WantClientError error + WantServerError error + }{ + { + Name: "No SRTP in use", + ClientSRTP: nil, + ServerSRTP: nil, + ExpectedProfile: 0, + WantClientError: nil, + WantServerError: nil, + }, + { + Name: "SRTP both ends", + ClientSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}, + ServerSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}, + ExpectedProfile: SRTP_AES128_CM_HMAC_SHA1_80, + WantClientError: nil, + WantServerError: nil, + }, + { + Name: "SRTP client only", + ClientSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}, + ServerSRTP: nil, + ExpectedProfile: 0, + WantClientError: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + WantServerError: errServerNoMatchingSRTPProfile, + }, + { + Name: "SRTP server only", + ClientSRTP: nil, + ServerSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80}, + ExpectedProfile: 0, + WantClientError: nil, + WantServerError: nil, + }, + { + Name: "Multiple Suites", + ClientSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80, SRTP_AES128_CM_HMAC_SHA1_32}, + ServerSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80, SRTP_AES128_CM_HMAC_SHA1_32}, + ExpectedProfile: SRTP_AES128_CM_HMAC_SHA1_80, + WantClientError: nil, + WantServerError: nil, + }, + { + Name: "Multiple Suites, Client Chooses", + ClientSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_80, SRTP_AES128_CM_HMAC_SHA1_32}, + ServerSRTP: []SRTPProtectionProfile{SRTP_AES128_CM_HMAC_SHA1_32, SRTP_AES128_CM_HMAC_SHA1_80}, + ExpectedProfile: SRTP_AES128_CM_HMAC_SHA1_80, + WantClientError: nil, + WantServerError: nil, + }, + } { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, &Config{SRTPProtectionProfiles: test.ClientSRTP}, true) + c <- result{client, err} + }() + + server, err := testServer(ctx, cb, &Config{SRTPProtectionProfiles: test.ServerSRTP}, true) + if !errors.Is(err, test.WantServerError) { + t.Errorf("TestSRTPConfiguration: Server Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantServerError, err) + } + if err == nil { + defer func() { + _ = server.Close() + }() + } + + res := <-c + if res.err == nil { + defer func() { + _ = res.c.Close() + }() + } + if !errors.Is(res.err, test.WantClientError) { + t.Fatalf("TestSRTPConfiguration: Client Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantClientError, res.err) + } + if res.c == nil { + return + } + + actualClientSRTP, _ := res.c.SelectedSRTPProtectionProfile() + if actualClientSRTP != test.ExpectedProfile { + t.Errorf("TestSRTPConfiguration: Client SRTPProtectionProfile Mismatch '%s': expected(%v) actual(%v)", test.Name, test.ExpectedProfile, actualClientSRTP) + } + + actualServerSRTP, _ := server.SelectedSRTPProtectionProfile() + if actualServerSRTP != test.ExpectedProfile { + t.Errorf("TestSRTPConfiguration: Server SRTPProtectionProfile Mismatch '%s': expected(%v) actual(%v)", test.Name, test.ExpectedProfile, actualServerSRTP) + } + } +} + +func TestClientCertificate(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + srvCert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + srvCAPool := x509.NewCertPool() + srvCertificate, err := x509.ParseCertificate(srvCert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + srvCAPool.AddCert(srvCertificate) + + cert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + caPool := x509.NewCertPool() + caPool.AddCert(certificate) + + t.Run("parallel", func(t *testing.T) { // sync routines to check routine leak + tests := map[string]struct { + clientCfg *Config + serverCfg *Config + wantErr bool + }{ + "NoClientCert": { + clientCfg: &Config{RootCAs: srvCAPool}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: NoClientCert, + ClientCAs: caPool, + }, + }, + "NoClientCert_ServerVerifyConnectionFails": { + clientCfg: &Config{RootCAs: srvCAPool}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: NoClientCert, + ClientCAs: caPool, + VerifyConnection: func(s *State) error { + return errExample + }, + }, + wantErr: true, + }, + "NoClientCert_ClientVerifyConnectionFails": { + clientCfg: &Config{RootCAs: srvCAPool, VerifyConnection: func(s *State) error { + return errExample + }}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: NoClientCert, + ClientCAs: caPool, + }, + wantErr: true, + }, + "NoClientCert_cert": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequireAnyClientCert, + }, + }, + "RequestClientCert_cert": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequestClientCert, + }, + }, + "RequestClientCert_no_cert": { + clientCfg: &Config{RootCAs: srvCAPool}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequestClientCert, + ClientCAs: caPool, + }, + }, + "RequireAnyClientCert": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequireAnyClientCert, + }, + }, + "RequireAnyClientCert_error": { + clientCfg: &Config{RootCAs: srvCAPool}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequireAnyClientCert, + }, + wantErr: true, + }, + "VerifyClientCertIfGiven_no_cert": { + clientCfg: &Config{RootCAs: srvCAPool}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: VerifyClientCertIfGiven, + ClientCAs: caPool, + }, + }, + "VerifyClientCertIfGiven_cert": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: VerifyClientCertIfGiven, + ClientCAs: caPool, + }, + }, + "VerifyClientCertIfGiven_error": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: VerifyClientCertIfGiven, + }, + wantErr: true, + }, + "RequireAndVerifyClientCert": { + clientCfg: &Config{RootCAs: srvCAPool, Certificates: []tls.Certificate{cert}, VerifyConnection: func(s *State) error { + if ok := bytes.Equal(s.PeerCertificates[0], srvCertificate.Raw); !ok { + return errExample + } + return nil + }}, + serverCfg: &Config{ + Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequireAndVerifyClientCert, + ClientCAs: caPool, + VerifyConnection: func(s *State) error { + if ok := bytes.Equal(s.PeerCertificates[0], certificate.Raw); !ok { + return errExample + } + return nil + }, + }, + }, + "RequireAndVerifyClientCert_callbacks": { + clientCfg: &Config{ + RootCAs: srvCAPool, + // Certificates: []tls.Certificate{cert}, + GetClientCertificate: func(cri *CertificateRequestInfo) (*tls.Certificate, error) { return &cert, nil }, + }, + serverCfg: &Config{ + GetCertificate: func(chi *ClientHelloInfo) (*tls.Certificate, error) { return &srvCert, nil }, + // Certificates: []tls.Certificate{srvCert}, + ClientAuth: RequireAndVerifyClientCert, + ClientCAs: caPool, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := Client(ca, tt.clientCfg) + c <- result{client, err} + }() + + server, err := Server(cb, tt.serverCfg) + res := <-c + defer func() { + if err == nil { + _ = server.Close() + } + if res.err == nil { + _ = res.c.Close() + } + }() + + if tt.wantErr { + if err != nil { + // Error expected, test succeeded + return + } + t.Error("Error expected") + } + if err != nil { + t.Errorf("Server failed(%v)", err) + } + + if res.err != nil { + t.Errorf("Client failed(%v)", res.err) + } + + actualClientCert := server.ConnectionState().PeerCertificates + if tt.serverCfg.ClientAuth == RequireAnyClientCert || tt.serverCfg.ClientAuth == RequireAndVerifyClientCert { + if actualClientCert == nil { + t.Errorf("Client did not provide a certificate") + } + + var cfgCert [][]byte + if len(tt.clientCfg.Certificates) > 0 { + cfgCert = tt.clientCfg.Certificates[0].Certificate + } + if tt.clientCfg.GetClientCertificate != nil { + crt, err := tt.clientCfg.GetClientCertificate(&CertificateRequestInfo{}) + if err != nil { + t.Errorf("Server configuration did not provide a certificate") + } + cfgCert = crt.Certificate + } + if len(cfgCert) == 0 || !bytes.Equal(cfgCert[0], actualClientCert[0]) { + t.Errorf("Client certificate was not communicated correctly") + } + } + if tt.serverCfg.ClientAuth == NoClientCert { + if actualClientCert != nil { + t.Errorf("Client certificate wasn't expected") + } + } + + actualServerCert := res.c.ConnectionState().PeerCertificates + if actualServerCert == nil { + t.Errorf("Server did not provide a certificate") + } + var cfgCert [][]byte + if len(tt.serverCfg.Certificates) > 0 { + cfgCert = tt.serverCfg.Certificates[0].Certificate + } + if tt.serverCfg.GetCertificate != nil { + crt, err := tt.serverCfg.GetCertificate(&ClientHelloInfo{}) + if err != nil { + t.Errorf("Server configuration did not provide a certificate") + } + cfgCert = crt.Certificate + } + if len(cfgCert) == 0 || !bytes.Equal(cfgCert[0], actualServerCert[0]) { + t.Errorf("Server certificate was not communicated correctly") + } + }) + } + }) +} + +func TestExtendedMasterSecret(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + tests := map[string]struct { + clientCfg *Config + serverCfg *Config + expectedClientErr error + expectedServerErr error + }{ + "Request_Request_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Request_Require_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Request_Disable_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Require_Request_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Require_Require_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Require_Disable_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + expectedClientErr: errClientRequiredButNoServerEMS, + expectedServerErr: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + }, + "Disable_Request_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequestExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + "Disable_Require_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: RequireExtendedMasterSecret, + }, + expectedClientErr: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + expectedServerErr: errServerRequiredButNoClientEMS, + }, + "Disable_Disable_ExtendedMasterSecret": { + clientCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + serverCfg: &Config{ + ExtendedMasterSecret: DisableExtendedMasterSecret, + }, + expectedClientErr: nil, + expectedServerErr: nil, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, tt.clientCfg, true) + c <- result{client, err} + }() + + server, err := testServer(ctx, cb, tt.serverCfg, true) + res := <-c + defer func() { + if err == nil { + _ = server.Close() + } + if res.err == nil { + _ = res.c.Close() + } + }() + + if !errors.Is(res.err, tt.expectedClientErr) { + t.Errorf("Client error expected: \"%v\" but got \"%v\"", tt.expectedClientErr, res.err) + } + + if !errors.Is(err, tt.expectedServerErr) { + t.Errorf("Server error expected: \"%v\" but got \"%v\"", tt.expectedServerErr, err) + } + }) + } +} + +func TestServerCertificate(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + cert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + caPool := x509.NewCertPool() + caPool.AddCert(certificate) + + t.Run("parallel", func(t *testing.T) { // sync routines to check routine leak + tests := map[string]struct { + clientCfg *Config + serverCfg *Config + wantErr bool + }{ + "no_ca": { + clientCfg: &Config{}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + wantErr: true, + }, + "good_ca": { + clientCfg: &Config{RootCAs: caPool}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + }, + "no_ca_skip_verify": { + clientCfg: &Config{InsecureSkipVerify: true}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + }, + "good_ca_skip_verify_custom_verify_peer": { + clientCfg: &Config{RootCAs: caPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: RequireAnyClientCert, VerifyPeerCertificate: func(cert [][]byte, chain [][]*x509.Certificate) error { + if len(chain) != 0 { + return errNotExpectedChain + } + return nil + }}, + }, + "good_ca_verify_custom_verify_peer": { + clientCfg: &Config{RootCAs: caPool, Certificates: []tls.Certificate{cert}}, + serverCfg: &Config{ClientCAs: caPool, Certificates: []tls.Certificate{cert}, ClientAuth: RequireAndVerifyClientCert, VerifyPeerCertificate: func(cert [][]byte, chain [][]*x509.Certificate) error { + if len(chain) == 0 { + return errExpecedChain + } + return nil + }}, + }, + "good_ca_custom_verify_peer": { + clientCfg: &Config{ + RootCAs: caPool, + VerifyPeerCertificate: func([][]byte, [][]*x509.Certificate) error { + return errWrongCert + }, + }, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + wantErr: true, + }, + "server_name": { + clientCfg: &Config{RootCAs: caPool, ServerName: certificate.Subject.CommonName}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + }, + "server_name_error": { + clientCfg: &Config{RootCAs: caPool, ServerName: "barfoo"}, + serverCfg: &Config{Certificates: []tls.Certificate{cert}, ClientAuth: NoClientCert}, + wantErr: true, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + ca, cb := dpipe.Pipe() + + type result struct { + c *Conn + err error + } + srvCh := make(chan result) + go func() { + s, err := Server(cb, tt.serverCfg) + srvCh <- result{s, err} + }() + + cli, err := Client(ca, tt.clientCfg) + if err == nil { + _ = cli.Close() + } + if !tt.wantErr && err != nil { + t.Errorf("Client failed(%v)", err) + } + if tt.wantErr && err == nil { + t.Fatal("Error expected") + } + + srv := <-srvCh + if srv.err == nil { + _ = srv.c.Close() + } + }) + } + }) +} + +func TestCipherSuiteConfiguration(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ClientCipherSuites []CipherSuiteID + ServerCipherSuites []CipherSuiteID + WantClientError error + WantServerError error + WantSelectedCipherSuite CipherSuiteID + }{ + { + Name: "No CipherSuites specified", + ClientCipherSuites: nil, + ServerCipherSuites: nil, + WantClientError: nil, + WantServerError: nil, + }, + { + Name: "Invalid CipherSuite", + ClientCipherSuites: []CipherSuiteID{0x00}, + ServerCipherSuites: []CipherSuiteID{0x00}, + WantClientError: &invalidCipherSuiteError{0x00}, + WantServerError: &invalidCipherSuiteError{0x00}, + }, + { + Name: "Valid CipherSuites specified", + ClientCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ServerCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + WantClientError: nil, + WantServerError: nil, + WantSelectedCipherSuite: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + }, + { + Name: "CipherSuites mismatch", + ClientCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ServerCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + WantClientError: &alertError{&alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}}, + WantServerError: errCipherSuiteNoIntersection, + }, + { + Name: "Valid CipherSuites CCM specified", + ClientCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_CCM}, + ServerCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_CCM}, + WantClientError: nil, + WantServerError: nil, + WantSelectedCipherSuite: TLS_ECDHE_ECDSA_WITH_AES_128_CCM, + }, + { + Name: "Valid CipherSuites CCM-8 specified", + ClientCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8}, + ServerCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8}, + WantClientError: nil, + WantServerError: nil, + WantSelectedCipherSuite: TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8, + }, + { + Name: "Server supports subset of client suites", + ClientCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + ServerCipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + WantClientError: nil, + WantServerError: nil, + WantSelectedCipherSuite: TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, &Config{CipherSuites: test.ClientCipherSuites}, true) + c <- result{client, err} + }() + + server, err := testServer(ctx, cb, &Config{CipherSuites: test.ServerCipherSuites}, true) + if err == nil { + defer func() { + _ = server.Close() + }() + } + if !errors.Is(err, test.WantServerError) { + t.Errorf("TestCipherSuiteConfiguration: Server Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantServerError, err) + } + + res := <-c + if res.err == nil { + _ = server.Close() + _ = res.c.Close() + } + if !errors.Is(res.err, test.WantClientError) { + t.Errorf("TestSRTPConfiguration: Client Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantClientError, res.err) + } + if test.WantSelectedCipherSuite != 0x00 && res.c.state.cipherSuite.ID() != test.WantSelectedCipherSuite { + t.Errorf("TestCipherSuiteConfiguration: Server Selected Bad Cipher Suite '%s': expected(%v) actual(%v)", test.Name, test.WantSelectedCipherSuite, res.c.state.cipherSuite.ID()) + } + }) + } +} + +func TestCertificateAndPSKServer(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ClientPSK bool + }{ + { + Name: "Client uses PKI", + ClientPSK: false, + }, + { + Name: "Client uses PSK", + ClientPSK: true, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + config := &Config{CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}} + if test.ClientPSK { + config.PSK = func([]byte) ([]byte, error) { + return []byte{0x00, 0x01, 0x02}, nil + } + config.PSKIdentityHint = []byte{0x00} + config.CipherSuites = []CipherSuiteID{TLS_PSK_WITH_AES_128_GCM_SHA256} + } + + client, err := testClient(ctx, ca, config, false) + c <- result{client, err} + }() + + config := &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_PSK_WITH_AES_128_GCM_SHA256}, + PSK: func([]byte) ([]byte, error) { + return []byte{0x00, 0x01, 0x02}, nil + }, + } + + server, err := testServer(ctx, cb, config, true) + if err == nil { + defer func() { + _ = server.Close() + }() + } else { + t.Errorf("TestCertificateAndPSKServer: Server Error Mismatch '%s': expected(%v) actual(%v)", test.Name, nil, err) + } + + res := <-c + if res.err == nil { + _ = server.Close() + _ = res.c.Close() + } else { + t.Errorf("TestCertificateAndPSKServer: Client Error Mismatch '%s': expected(%v) actual(%v)", test.Name, nil, res.err) + } + }) + } +} + +func TestPSKConfiguration(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ClientHasCertificate bool + ServerHasCertificate bool + ClientPSK PSKCallback + ServerPSK PSKCallback + ClientPSKIdentity []byte + ServerPSKIdentity []byte + WantClientError error + WantServerError error + }{ + { + Name: "PSK and no certificate specified", + ClientHasCertificate: false, + ServerHasCertificate: false, + ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ClientPSKIdentity: []byte{0x00}, + ServerPSKIdentity: []byte{0x00}, + WantClientError: errNoAvailablePSKCipherSuite, + WantServerError: errNoAvailablePSKCipherSuite, + }, + { + Name: "PSK and certificate specified", + ClientHasCertificate: true, + ServerHasCertificate: true, + ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ClientPSKIdentity: []byte{0x00}, + ServerPSKIdentity: []byte{0x00}, + WantClientError: errNoAvailablePSKCipherSuite, + WantServerError: errNoAvailablePSKCipherSuite, + }, + { + Name: "PSK and no identity specified", + ClientHasCertificate: false, + ServerHasCertificate: false, + ClientPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ServerPSK: func([]byte) ([]byte, error) { return []byte{0x00, 0x01, 0x02}, nil }, + ClientPSKIdentity: nil, + ServerPSKIdentity: nil, + WantClientError: errPSKAndIdentityMustBeSetForClient, + WantServerError: errNoAvailablePSKCipherSuite, + }, + { + Name: "No PSK and identity specified", + ClientHasCertificate: false, + ServerHasCertificate: false, + ClientPSK: nil, + ServerPSK: nil, + ClientPSKIdentity: []byte{0x00}, + ServerPSKIdentity: []byte{0x00}, + WantClientError: errIdentityNoPSK, + WantServerError: errIdentityNoPSK, + }, + } { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, &Config{PSK: test.ClientPSK, PSKIdentityHint: test.ClientPSKIdentity}, test.ClientHasCertificate) + c <- result{client, err} + }() + + _, err := testServer(ctx, cb, &Config{PSK: test.ServerPSK, PSKIdentityHint: test.ServerPSKIdentity}, test.ServerHasCertificate) + if err != nil || test.WantServerError != nil { + if !(err != nil && test.WantServerError != nil && err.Error() == test.WantServerError.Error()) { + t.Fatalf("TestPSKConfiguration: Server Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantServerError, err) + } + } + + res := <-c + if res.err != nil || test.WantClientError != nil { + if !(res.err != nil && test.WantClientError != nil && res.err.Error() == test.WantClientError.Error()) { + t.Fatalf("TestPSKConfiguration: Client Error Mismatch '%s': expected(%v) actual(%v)", test.Name, test.WantClientError, res.err) + } + } + } +} + +func TestServerTimeout(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + cookie := make([]byte, 20) + _, err := rand.Read(cookie) + if err != nil { + t.Fatal(err) + } + + var rand [28]byte + random := handshake.Random{GMTUnixTime: time.Unix(500, 0), RandomBytes: rand} + + cipherSuites := []CipherSuite{ + &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}, + } + + extensions := []extension.Extension{ + &extension.SupportedSignatureAlgorithms{ + SignatureHashAlgorithms: []signaturehash.Algorithm{ + {Hash: hash.SHA256, Signature: signature.ECDSA}, + {Hash: hash.SHA384, Signature: signature.ECDSA}, + {Hash: hash.SHA512, Signature: signature.ECDSA}, + {Hash: hash.SHA256, Signature: signature.RSA}, + {Hash: hash.SHA384, Signature: signature.RSA}, + {Hash: hash.SHA512, Signature: signature.RSA}, + }, + }, + &extension.SupportedEllipticCurves{ + EllipticCurves: []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384}, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + } + + record := &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + SequenceNumber: 0, + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + // sequenceNumber and messageSequence line up, may need to be re-evaluated + Header: handshake.Header{ + MessageSequence: 0, + }, + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + Cookie: cookie, + Random: random, + CipherSuiteIDs: cipherSuiteIDs(cipherSuites), + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + } + + packet, err := record.Marshal() + if err != nil { + t.Fatal(err) + } + + ca, cb := dpipe.Pipe() + defer func() { + err := ca.Close() + if err != nil { + t.Fatal(err) + } + }() + + // Client reader + caReadChan := make(chan []byte, 1000) + go func() { + for { + data := make([]byte, 8192) + n, err := ca.Read(data) + if err != nil { + return + } + + caReadChan <- data[:n] + } + }() + + // Start sending ClientHello packets until server responds with first packet + go func() { + for { + select { + case <-time.After(10 * time.Millisecond): + _, err := ca.Write(packet) + if err != nil { + return + } + case <-caReadChan: + // Once we receive the first reply from the server, stop + return + } + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + config := &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + FlightInterval: 100 * time.Millisecond, + } + + _, serverErr := testServer(ctx, cb, config, true) + var netErr net.Error + if !errors.As(serverErr, &netErr) || !netErr.Timeout() { + t.Fatalf("Client error exp(Temporary network error) failed(%v)", serverErr) + } + + // Wait a little longer to ensure no additional messages have been sent by the server + time.Sleep(300 * time.Millisecond) + select { + case msg := <-caReadChan: + t.Fatalf("Expected no additional messages from server, got: %+v", msg) + default: + } +} + +func TestProtocolVersionValidation(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + cookie := make([]byte, 20) + if _, err := rand.Read(cookie); err != nil { + t.Fatal(err) + } + + var rand [28]byte + random := handshake.Random{GMTUnixTime: time.Unix(500, 0), RandomBytes: rand} + + config := &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + FlightInterval: 100 * time.Millisecond, + } + + t.Run("Server", func(t *testing.T) { + serverCases := map[string]struct { + records []*recordlayer.RecordLayer + }{ + "ClientHelloVersion": { + records: []*recordlayer.RecordLayer{ + { + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version{Major: 0xfe, Minor: 0xff}, // try to downgrade + Cookie: cookie, + Random: random, + CipherSuiteIDs: []uint16{uint16((&ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}).ID())}, + CompressionMethods: defaultCompressionMethods(), + }, + }, + }, + }, + }, + "SecondsClientHelloVersion": { + records: []*recordlayer.RecordLayer{ + { + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + Cookie: cookie, + Random: random, + CipherSuiteIDs: []uint16{uint16((&ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}).ID())}, + CompressionMethods: defaultCompressionMethods(), + }, + }, + }, + { + Header: recordlayer.Header{ + Version: protocol.Version1_2, + SequenceNumber: 1, + }, + Content: &handshake.Handshake{ + Header: handshake.Header{ + MessageSequence: 1, + }, + Message: &handshake.MessageClientHello{ + Version: protocol.Version{Major: 0xfe, Minor: 0xff}, // try to downgrade + Cookie: cookie, + Random: random, + CipherSuiteIDs: []uint16{uint16((&ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}).ID())}, + CompressionMethods: defaultCompressionMethods(), + }, + }, + }, + }, + }, + } + for name, c := range serverCases { + c := c + t.Run(name, func(t *testing.T) { + ca, cb := dpipe.Pipe() + defer func() { + err := ca.Close() + if err != nil { + t.Error(err) + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + if _, err := testServer(ctx, cb, config, true); !errors.Is(err, errUnsupportedProtocolVersion) { + t.Errorf("Client error exp(%v) failed(%v)", errUnsupportedProtocolVersion, err) + } + }() + + time.Sleep(50 * time.Millisecond) + + resp := make([]byte, 1024) + for _, record := range c.records { + packet, err := record.Marshal() + if err != nil { + t.Fatal(err) + } + if _, werr := ca.Write(packet); werr != nil { + t.Fatal(werr) + } + n, rerr := ca.Read(resp[:cap(resp)]) + if rerr != nil { + t.Fatal(rerr) + } + resp = resp[:n] + } + + h := &recordlayer.Header{} + if err := h.Unmarshal(resp); err != nil { + t.Fatal("Failed to unmarshal response") + } + if h.ContentType != protocol.ContentTypeAlert { + t.Errorf("Peer must return alert to unsupported protocol version") + } + }) + } + }) + + t.Run("Client", func(t *testing.T) { + clientCases := map[string]struct { + records []*recordlayer.RecordLayer + }{ + "ServerHelloVersion": { + records: []*recordlayer.RecordLayer{ + { + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageHelloVerifyRequest{ + Version: protocol.Version1_2, + Cookie: cookie, + }, + }, + }, + { + Header: recordlayer.Header{ + Version: protocol.Version1_2, + SequenceNumber: 1, + }, + Content: &handshake.Handshake{ + Header: handshake.Header{ + MessageSequence: 1, + }, + Message: &handshake.MessageServerHello{ + Version: protocol.Version{Major: 0xfe, Minor: 0xff}, // try to downgrade + Random: random, + CipherSuiteID: func() *uint16 { id := uint16(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256); return &id }(), + CompressionMethod: defaultCompressionMethods()[0], + }, + }, + }, + }, + }, + } + for name, c := range clientCases { + c := c + t.Run(name, func(t *testing.T) { + ca, cb := dpipe.Pipe() + defer func() { + err := ca.Close() + if err != nil { + t.Error(err) + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + if _, err := testClient(ctx, cb, config, true); !errors.Is(err, errUnsupportedProtocolVersion) { + t.Errorf("Server error exp(%v) failed(%v)", errUnsupportedProtocolVersion, err) + } + }() + + time.Sleep(50 * time.Millisecond) + + for _, record := range c.records { + if _, err := ca.Read(make([]byte, 1024)); err != nil { + t.Fatal(err) + } + + packet, err := record.Marshal() + if err != nil { + t.Fatal(err) + } + if _, err := ca.Write(packet); err != nil { + t.Fatal(err) + } + } + resp := make([]byte, 1024) + n, err := ca.Read(resp) + if err != nil { + t.Fatal(err) + } + resp = resp[:n] + + h := &recordlayer.Header{} + if err := h.Unmarshal(resp); err != nil { + t.Fatal("Failed to unmarshal response") + } + if h.ContentType != protocol.ContentTypeAlert { + t.Errorf("Peer must return alert to unsupported protocol version") + } + }) + } + }) +} + +func TestMultipleHelloVerifyRequest(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + cookies := [][]byte{ + // first clientHello contains an empty cookie + {}, + } + var packets [][]byte + for i := 0; i < 2; i++ { + cookie := make([]byte, 20) + if _, err := rand.Read(cookie); err != nil { + t.Fatal(err) + } + cookies = append(cookies, cookie) + + record := &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + SequenceNumber: uint64(i), + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Header: handshake.Header{ + MessageSequence: uint16(i), + }, + Message: &handshake.MessageHelloVerifyRequest{ + Version: protocol.Version1_2, + Cookie: cookie, + }, + }, + } + packet, err := record.Marshal() + if err != nil { + t.Fatal(err) + } + packets = append(packets, packet) + } + + ca, cb := dpipe.Pipe() + defer func() { + err := ca.Close() + if err != nil { + t.Error(err) + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() + go func() { + defer wg.Done() + _, _ = testClient(ctx, ca, &Config{}, false) + }() + + for i, cookie := range cookies { + // read client hello + resp := make([]byte, 1024) + n, err := cb.Read(resp) + if err != nil { + t.Fatal(err) + } + record := &recordlayer.RecordLayer{} + if err := record.Unmarshal(resp[:n]); err != nil { + t.Fatal(err) + } + clientHello, ok := record.Content.(*handshake.Handshake).Message.(*handshake.MessageClientHello) + if !ok { + t.Fatal("Failed to cast MessageClientHello") + } + + if !bytes.Equal(clientHello.Cookie, cookie) { + t.Fatalf("Wrong cookie, expected: %x, got: %x", clientHello.Cookie, cookie) + } + if len(packets) <= i { + break + } + // write hello verify request + if _, err := cb.Write(packets[i]); err != nil { + t.Fatal(err) + } + } + cancel() +} + +// Assert that a DTLS Server always responds with RenegotiationInfo if +// a ClientHello contained that extension or not +func TestRenegotationInfo(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(10 * time.Second) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + resp := make([]byte, 1024) + + for _, testCase := range []struct { + Name string + SendRenegotiationInfo bool + }{ + { + "Include RenegotiationInfo", + true, + }, + { + "No RenegotiationInfo", + false, + }, + } { + test := testCase + t.Run(test.Name, func(t *testing.T) { + ca, cb := dpipe.Pipe() + defer func() { + if err := ca.Close(); err != nil { + t.Error(err) + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + if _, err := testServer(ctx, cb, &Config{}, true); !errors.Is(err, context.Canceled) { + t.Error(err) + } + }() + + time.Sleep(50 * time.Millisecond) + + extensions := []extension.Extension{} + if test.SendRenegotiationInfo { + extensions = append(extensions, &extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }) + } + err := sendClientHello([]byte{}, ca, 0, extensions) + if err != nil { + t.Fatal(err) + } + n, err := ca.Read(resp) + if err != nil { + t.Fatal(err) + } + r := &recordlayer.RecordLayer{} + if err = r.Unmarshal(resp[:n]); err != nil { + t.Fatal(err) + } + + helloVerifyRequest, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageHelloVerifyRequest) + if !ok { + t.Fatal("Failed to cast MessageHelloVerifyRequest") + } + + err = sendClientHello(helloVerifyRequest.Cookie, ca, 1, extensions) + if err != nil { + t.Fatal(err) + } + if n, err = ca.Read(resp); err != nil { + t.Fatal(err) + } + + messages, err := recordlayer.UnpackDatagram(resp[:n]) + if err != nil { + t.Fatal(err) + } + + if err := r.Unmarshal(messages[0]); err != nil { + t.Fatal(err) + } + + serverHello, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageServerHello) + if !ok { + t.Fatal("Failed to cast MessageServerHello") + } + + gotNegotationInfo := false + for _, v := range serverHello.Extensions { + if _, ok := v.(*extension.RenegotiationInfo); ok { + gotNegotationInfo = true + } + } + + if !gotNegotationInfo { + t.Fatalf("Received ServerHello without RenegotiationInfo") + } + }) + } +} + +func TestServerNameIndicationExtension(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ServerName string + Expected []byte + IncludeSNI bool + }{ + { + Name: "Server name is a valid hostname", + ServerName: "example.com", + Expected: []byte("example.com"), + IncludeSNI: true, + }, + { + Name: "Server name is an IP literal", + ServerName: "1.2.3.4", + Expected: []byte(""), + IncludeSNI: false, + }, + { + Name: "Server name is empty", + ServerName: "", + Expected: []byte(""), + IncludeSNI: false, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + go func() { + conf := &Config{ + ServerName: test.ServerName, + } + + _, _ = testClient(ctx, ca, conf, false) + }() + + // Receive ClientHello + resp := make([]byte, 1024) + n, err := cb.Read(resp) + if err != nil { + t.Fatal(err) + } + r := &recordlayer.RecordLayer{} + if err = r.Unmarshal(resp[:n]); err != nil { + t.Fatal(err) + } + + clientHello, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageClientHello) + if !ok { + t.Fatal("Failed to cast MessageClientHello") + } + + gotSNI := false + var actualServerName string + for _, v := range clientHello.Extensions { + if _, ok := v.(*extension.ServerName); ok { + gotSNI = true + extensionServerName, ok := v.(*extension.ServerName) + if !ok { + t.Fatal("Failed to cast extension.ServerName") + } + + actualServerName = extensionServerName.ServerName + } + } + + if gotSNI != test.IncludeSNI { + t.Errorf("TestSNI: unexpected SNI inclusion '%s': expected(%v) actual(%v)", test.Name, test.IncludeSNI, gotSNI) + } + + if !bytes.Equal([]byte(actualServerName), test.Expected) { + t.Errorf("TestSNI: server name mismatch '%s': expected(%v) actual(%v)", test.Name, test.Expected, actualServerName) + } + }) + } +} + +func TestALPNExtension(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ClientProtocolNameList []string + ServerProtocolNameList []string + ExpectedProtocol string + ExpectAlertFromClient bool + ExpectAlertFromServer bool + Alert alert.Description + }{ + { + Name: "Negotiate a protocol", + ClientProtocolNameList: []string{"http/1.1", "spd/1"}, + ServerProtocolNameList: []string{"spd/1"}, + ExpectedProtocol: "spd/1", + ExpectAlertFromClient: false, + ExpectAlertFromServer: false, + Alert: 0, + }, + { + Name: "Server doesn't support any", + ClientProtocolNameList: []string{"http/1.1", "spd/1"}, + ServerProtocolNameList: []string{}, + ExpectedProtocol: "", + ExpectAlertFromClient: false, + ExpectAlertFromServer: false, + Alert: 0, + }, + { + Name: "Negotiate with higher server precedence", + ClientProtocolNameList: []string{"http/1.1", "spd/1", "http/3"}, + ServerProtocolNameList: []string{"ssh/2", "http/3", "spd/1"}, + ExpectedProtocol: "http/3", + ExpectAlertFromClient: false, + ExpectAlertFromServer: false, + Alert: 0, + }, + { + Name: "Empty intersection", + ClientProtocolNameList: []string{"http/1.1", "http/3"}, + ServerProtocolNameList: []string{"ssh/2", "spd/1"}, + ExpectedProtocol: "", + ExpectAlertFromClient: false, + ExpectAlertFromServer: true, + Alert: alert.NoApplicationProtocol, + }, + { + Name: "Multiple protocols in ServerHello", + ClientProtocolNameList: []string{"http/1.1"}, + ServerProtocolNameList: []string{"http/1.1"}, + ExpectedProtocol: "http/1.1", + ExpectAlertFromClient: true, + ExpectAlertFromServer: false, + Alert: alert.InternalError, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + go func() { + conf := &Config{ + SupportedProtocols: test.ClientProtocolNameList, + } + _, _ = testClient(ctx, ca, conf, false) + }() + + // Receive ClientHello + resp := make([]byte, 1024) + n, err := cb.Read(resp) + if err != nil { + t.Fatal(err) + } + + ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel2() + + ca2, cb2 := dpipe.Pipe() + go func() { + conf := &Config{ + SupportedProtocols: test.ServerProtocolNameList, + } + if _, err2 := testServer(ctx2, cb2, conf, true); !errors.Is(err2, context.Canceled) { + if test.ExpectAlertFromServer { //nolint + // Assert the error type? + } else { + t.Error(err2) + } + } + }() + + time.Sleep(50 * time.Millisecond) + + // Forward ClientHello + if _, err = ca2.Write(resp[:n]); err != nil { + t.Fatal(err) + } + + // Receive HelloVerify + resp2 := make([]byte, 1024) + n, err = ca2.Read(resp2) + if err != nil { + t.Fatal(err) + } + + // Forward HelloVerify + if _, err = cb.Write(resp2[:n]); err != nil { + t.Fatal(err) + } + + // Receive ClientHello + resp3 := make([]byte, 1024) + n, err = cb.Read(resp3) + if err != nil { + t.Fatal(err) + } + + // Forward ClientHello + if _, err = ca2.Write(resp3[:n]); err != nil { + t.Fatal(err) + } + + // Receive ServerHello + resp4 := make([]byte, 1024) + n, err = ca2.Read(resp4) + if err != nil { + t.Fatal(err) + } + + messages, err := recordlayer.UnpackDatagram(resp4[:n]) + if err != nil { + t.Fatal(err) + } + + r := &recordlayer.RecordLayer{} + if err := r.Unmarshal(messages[0]); err != nil { + t.Fatal(err) + } + + if test.ExpectAlertFromServer { + a, ok := r.Content.(*alert.Alert) + if !ok { + t.Fatal("Failed to cast alert.Alert") + } + + if a.Description != test.Alert { + t.Errorf("ALPN %v: expected(%v) actual(%v)", test.Name, test.Alert, a.Description) + } + } else { + serverHello, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageServerHello) + if !ok { + t.Fatal("Failed to cast handshake.MessageServerHello") + } + + var negotiatedProtocol string + for _, v := range serverHello.Extensions { + if _, ok := v.(*extension.ALPN); ok { + e, ok := v.(*extension.ALPN) + if !ok { + t.Fatal("Failed to cast extension.ALPN") + } + + negotiatedProtocol = e.ProtocolNameList[0] + + // Manipulate ServerHello + if test.ExpectAlertFromClient { + e.ProtocolNameList = append(e.ProtocolNameList, "oops") + } + } + } + + if negotiatedProtocol != test.ExpectedProtocol { + t.Errorf("ALPN %v: expected(%v) actual(%v)", test.Name, test.ExpectedProtocol, negotiatedProtocol) + } + + s, err := r.Marshal() + if err != nil { + t.Fatal(err) + } + + // Forward ServerHello + if _, err = cb.Write(s); err != nil { + t.Fatal(err) + } + + if test.ExpectAlertFromClient { + resp5 := make([]byte, 1024) + n, err = cb.Read(resp5) + if err != nil { + t.Fatal(err) + } + + r2 := &recordlayer.RecordLayer{} + if err := r2.Unmarshal(resp5[:n]); err != nil { + t.Fatal(err) + } + + a, ok := r2.Content.(*alert.Alert) + if !ok { + t.Fatal("Failed to cast alert.Alert") + } + + if a.Description != test.Alert { + t.Errorf("ALPN %v: expected(%v) actual(%v)", test.Name, test.Alert, a.Description) + } + } + } + + time.Sleep(50 * time.Millisecond) // Give some time for returned errors + }) + } +} + +// Make sure the supported_groups extension is not included in the ServerHello +func TestSupportedGroupsExtension(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + t.Run("ServerHello Supported Groups", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + go func() { + if _, err := testServer(ctx, cb, &Config{}, true); !errors.Is(err, context.Canceled) { + t.Error(err) + } + }() + extensions := []extension.Extension{ + &extension.SupportedEllipticCurves{ + EllipticCurves: []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384}, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + } + + time.Sleep(50 * time.Millisecond) + + resp := make([]byte, 1024) + err := sendClientHello([]byte{}, ca, 0, extensions) + if err != nil { + t.Fatal(err) + } + + // Receive ServerHello + n, err := ca.Read(resp) + if err != nil { + t.Fatal(err) + } + r := &recordlayer.RecordLayer{} + if err = r.Unmarshal(resp[:n]); err != nil { + t.Fatal(err) + } + + helloVerifyRequest, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageHelloVerifyRequest) + if !ok { + t.Fatal("Failed to cast MessageHelloVerifyRequest") + } + + err = sendClientHello(helloVerifyRequest.Cookie, ca, 1, extensions) + if err != nil { + t.Fatal(err) + } + if n, err = ca.Read(resp); err != nil { + t.Fatal(err) + } + + messages, err := recordlayer.UnpackDatagram(resp[:n]) + if err != nil { + t.Fatal(err) + } + + if err := r.Unmarshal(messages[0]); err != nil { + t.Fatal(err) + } + + serverHello, ok := r.Content.(*handshake.Handshake).Message.(*handshake.MessageServerHello) + if !ok { + t.Fatal("Failed to cast MessageServerHello") + } + + gotGroups := false + for _, v := range serverHello.Extensions { + if _, ok := v.(*extension.SupportedEllipticCurves); ok { + gotGroups = true + } + } + + if gotGroups { + t.Errorf("TestSupportedGroups: supported_groups extension was sent in ServerHello") + } + }) +} + +func TestSessionResume(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + t.Run("resumed", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + type result struct { + c *Conn + err error + } + clientRes := make(chan result, 1) + + ss := &memSessStore{} + + id, _ := hex.DecodeString("9b9fc92255634d9fb109febed42166717bb8ded8c738ba71bc7f2a0d9dae0306") + secret, _ := hex.DecodeString("2e942a37aca5241deb2295b5fcedac221c7078d2503d2b62aeb48c880d7da73c001238b708559686b9da6e829c05ead7") + + s := Session{ID: id, Secret: secret} + + ca, cb := dpipe.Pipe() + + _ = ss.Set(id, s) + _ = ss.Set([]byte(ca.RemoteAddr().String()+"_example.com"), s) + + go func() { + config := &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ServerName: "example.com", + SessionStore: ss, + MTU: 100, + } + c, err := testClient(ctx, ca, config, false) + clientRes <- result{c, err} + }() + + config := &Config{ + CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ServerName: "example.com", + SessionStore: ss, + MTU: 100, + } + server, err := testServer(ctx, cb, config, true) + if err != nil { + t.Fatalf("TestSessionResume: Server failed(%v)", err) + } + + actualSessionID := server.ConnectionState().SessionID + actualMasterSecret := server.ConnectionState().masterSecret + if !bytes.Equal(actualSessionID, id) { + t.Errorf("TestSessionResumetion: SessionID Mismatch: expected(%v) actual(%v)", id, actualSessionID) + } + if !bytes.Equal(actualMasterSecret, secret) { + t.Errorf("TestSessionResumetion: masterSecret Mismatch: expected(%v) actual(%v)", secret, actualMasterSecret) + } + + defer func() { + _ = server.Close() + }() + + res := <-clientRes + if res.err != nil { + t.Fatal(res.err) + } + _ = res.c.Close() + }) + + t.Run("new session", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + type result struct { + c *Conn + err error + } + clientRes := make(chan result, 1) + + s1 := &memSessStore{} + s2 := &memSessStore{} + + ca, cb := dpipe.Pipe() + go func() { + config := &Config{ + ServerName: "example.com", + SessionStore: s1, + } + c, err := testClient(ctx, ca, config, false) + clientRes <- result{c, err} + }() + + config := &Config{ + SessionStore: s2, + } + server, err := testServer(ctx, cb, config, true) + if err != nil { + t.Fatalf("TestSessionResumetion: Server failed(%v)", err) + } + + actualSessionID := server.ConnectionState().SessionID + actualMasterSecret := server.ConnectionState().masterSecret + ss, _ := s2.Get(actualSessionID) + if !bytes.Equal(actualMasterSecret, ss.Secret) { + t.Errorf("TestSessionResumetion: masterSecret Mismatch: expected(%v) actual(%v)", ss.Secret, actualMasterSecret) + } + + defer func() { + _ = server.Close() + }() + + res := <-clientRes + if res.err != nil { + t.Fatal(res.err) + } + cs, _ := s1.Get([]byte(ca.RemoteAddr().String() + "_example.com")) + if !bytes.Equal(actualMasterSecret, cs.Secret) { + t.Errorf("TestSessionResumetion: masterSecret Mismatch: expected(%v) actual(%v)", ss.Secret, actualMasterSecret) + } + _ = res.c.Close() + }) +} + +type memSessStore struct { + sync.Map +} + +func (ms *memSessStore) Set(key []byte, s Session) error { + k := hex.EncodeToString(key) + ms.Store(k, s) + + return nil +} + +func (ms *memSessStore) Get(key []byte) (Session, error) { + k := hex.EncodeToString(key) + + v, ok := ms.Load(k) + if !ok { + return Session{}, nil + } + + s, ok := v.(Session) + if !ok { + return Session{}, nil + } + + return s, nil +} + +func (ms *memSessStore) Del(key []byte) error { + k := hex.EncodeToString(key) + ms.Delete(k) + + return nil +} + +// Assert that the server only uses CipherSuites with a hash+signature that matches +// the certificate. As specified in rfc5246#section-7.4.3 +func TestCipherSuiteMatchesCertificateType(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(time.Second * 20) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + cipherList []CipherSuiteID + expectedCipher CipherSuiteID + generateRSA bool + }{ + { + Name: "ECDSA Certificate with RSA CipherSuite first", + cipherList: []CipherSuiteID{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + expectedCipher: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + }, + { + Name: "RSA Certificate with ECDSA CipherSuite first", + cipherList: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + expectedCipher: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + generateRSA: true, + }, + } { + test := test + t.Run(test.Name, func(t *testing.T) { + clientErr := make(chan error, 1) + client := make(chan *Conn, 1) + + ca, cb := dpipe.Pipe() + go func() { + c, err := testClient(context.TODO(), ca, &Config{CipherSuites: test.cipherList}, false) + clientErr <- err + client <- c + }() + + var ( + priv crypto.PrivateKey + err error + ) + + if test.generateRSA { + if priv, err = rsa.GenerateKey(rand.Reader, 2048); err != nil { + t.Fatal(err) + } + } else { + if priv, err = ecdsa.GenerateKey(cryptoElliptic.P256(), rand.Reader); err != nil { + t.Fatal(err) + } + } + + serverCert, err := selfsign.SelfSign(priv) + if err != nil { + t.Fatal(err) + } + + if s, err := testServer(context.TODO(), cb, &Config{ + CipherSuites: test.cipherList, + Certificates: []tls.Certificate{serverCert}, + }, false); err != nil { + t.Fatal(err) + } else if err = s.Close(); err != nil { + t.Fatal(err) + } + + if c, err := <-client, <-clientErr; err != nil { + t.Fatal(err) + } else if err := c.Close(); err != nil { + t.Fatal(err) + } else if c.ConnectionState().cipherSuite.ID() != test.expectedCipher { + t.Fatalf("Expected(%s) and Actual(%s) CipherSuite do not match", test.expectedCipher, c.ConnectionState().cipherSuite.ID()) + } + }) + } +} + +// Test that we return the proper certificate if we are serving multiple ServerNames on a single Server +func TestMultipleServerCertificates(t *testing.T) { + fooCert, err := selfsign.GenerateSelfSignedWithDNS("foo") + if err != nil { + t.Fatal(err) + } + + barCert, err := selfsign.GenerateSelfSignedWithDNS("bar") + if err != nil { + t.Fatal(err) + } + + caPool := x509.NewCertPool() + for _, cert := range []tls.Certificate{fooCert, barCert} { + certificate, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + caPool.AddCert(certificate) + } + + for _, test := range []struct { + RequestServerName string + ExpectedDNSName string + }{ + { + "foo", + "foo", + }, + { + "bar", + "bar", + }, + { + "invalid", + "foo", + }, + } { + test := test + t.Run(test.RequestServerName, func(t *testing.T) { + clientErr := make(chan error, 2) + client := make(chan *Conn, 1) + + ca, cb := dpipe.Pipe() + go func() { + c, err := testClient(context.TODO(), ca, &Config{ + RootCAs: caPool, + ServerName: test.RequestServerName, + VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + certificate, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return err + } + + if certificate.DNSNames[0] != test.ExpectedDNSName { + return errWrongCert + } + + return nil + }, + }, false) + clientErr <- err + client <- c + }() + + if s, err := testServer(context.TODO(), cb, &Config{Certificates: []tls.Certificate{fooCert, barCert}}, false); err != nil { + t.Fatal(err) + } else if err = s.Close(); err != nil { + t.Fatal(err) + } + + if c, err := <-client, <-clientErr; err != nil { + t.Fatal(err) + } else if err := c.Close(); err != nil { + t.Fatal(err) + } + }) + } +} + +func TestEllipticCurveConfiguration(t *testing.T) { + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + for _, test := range []struct { + Name string + ConfigCurves []elliptic.Curve + HadnshakeCurves []elliptic.Curve + }{ + { + Name: "Curve defaulting", + ConfigCurves: nil, + HadnshakeCurves: defaultCurves, + }, + { + Name: "Single curve", + ConfigCurves: []elliptic.Curve{elliptic.X25519}, + HadnshakeCurves: []elliptic.Curve{elliptic.X25519}, + }, + { + Name: "Multiple curves", + ConfigCurves: []elliptic.Curve{elliptic.P384, elliptic.X25519}, + HadnshakeCurves: []elliptic.Curve{elliptic.P384, elliptic.X25519}, + }, + } { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + type result struct { + c *Conn + err error + } + c := make(chan result) + + go func() { + client, err := testClient(ctx, ca, &Config{CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, EllipticCurves: test.ConfigCurves}, true) + c <- result{client, err} + }() + + server, err := testServer(ctx, cb, &Config{CipherSuites: []CipherSuiteID{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, EllipticCurves: test.ConfigCurves}, true) + if err != nil { + t.Fatalf("Server error: %v", err) + } + + if len(test.ConfigCurves) == 0 && len(test.HadnshakeCurves) != len(server.fsm.cfg.ellipticCurves) { + t.Fatalf("Failed to default Elliptic curves, expected %d, got: %d", len(test.HadnshakeCurves), len(server.fsm.cfg.ellipticCurves)) + } + + if len(test.ConfigCurves) != 0 { + if len(test.HadnshakeCurves) != len(server.fsm.cfg.ellipticCurves) { + t.Fatalf("Failed to configure Elliptic curves, expect %d, got %d", len(test.HadnshakeCurves), len(server.fsm.cfg.ellipticCurves)) + } + for i, c := range test.ConfigCurves { + if c != server.fsm.cfg.ellipticCurves[i] { + t.Fatalf("Failed to maintain Elliptic curve order, expected %s, got %s", c, server.fsm.cfg.ellipticCurves[i]) + } + } + } + + res := <-c + if res.err != nil { + t.Fatalf("Client error; %v", err) + } + + defer func() { + err = server.Close() + if err != nil { + t.Fatal(err) + } + err = res.c.Close() + if err != nil { + t.Fatal(err) + } + }() + } +} + +func TestSkipHelloVerify(t *testing.T) { + report := test.CheckRoutines(t) + defer report() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + ca, cb := dpipe.Pipe() + certificate, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + gotHello := make(chan struct{}) + + go func() { + server, sErr := testServer(ctx, cb, &Config{ + Certificates: []tls.Certificate{certificate}, + LoggerFactory: logging.NewDefaultLoggerFactory(), + InsecureSkipVerifyHello: true, + }, false) + if sErr != nil { + t.Error(sErr) + return + } + buf := make([]byte, 1024) + if _, sErr = server.Read(buf); sErr != nil { + t.Error(sErr) + } + gotHello <- struct{}{} + if sErr = server.Close(); sErr != nil { //nolint:contextcheck + t.Error(sErr) + } + }() + + client, err := testClient(ctx, ca, &Config{ + LoggerFactory: logging.NewDefaultLoggerFactory(), + InsecureSkipVerify: true, + }, false) + if err != nil { + t.Fatal(err) + } + if _, err = client.Write([]byte("hello")); err != nil { + t.Error(err) + } + select { + case <-gotHello: + // OK + case <-time.After(time.Second * 5): + t.Error("timeout") + } + + if err = client.Close(); err != nil { + t.Error(err) + } +} + +type connWithCallback struct { + net.Conn + onWrite func([]byte) +} + +func (c *connWithCallback) Write(b []byte) (int, error) { + if c.onWrite != nil { + c.onWrite(b) + } + return c.Conn.Write(b) +} diff --git a/replace/dtls/crypto.go b/replace/dtls/crypto.go new file mode 100644 index 000000000..968910c7e --- /dev/null +++ b/replace/dtls/crypto.go @@ -0,0 +1,228 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/binary" + "math/big" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/hash" +) + +type ecdsaSignature struct { + R, S *big.Int +} + +func valueKeyMessage(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve) []byte { + serverECDHParams := make([]byte, 4) + serverECDHParams[0] = 3 // named curve + binary.BigEndian.PutUint16(serverECDHParams[1:], uint16(namedCurve)) + serverECDHParams[3] = byte(len(publicKey)) + + plaintext := []byte{} + plaintext = append(plaintext, clientRandom...) + plaintext = append(plaintext, serverRandom...) + plaintext = append(plaintext, serverECDHParams...) + plaintext = append(plaintext, publicKey...) + + return plaintext +} + +// If the client provided a "signature_algorithms" extension, then all +// certificates provided by the server MUST be signed by a +// hash/signature algorithm pair that appears in that extension +// +// https://tools.ietf.org/html/rfc5246#section-7.4.2 +func generateKeySignature(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) { + msg := valueKeyMessage(clientRandom, serverRandom, publicKey, namedCurve) + switch p := privateKey.(type) { + case ed25519.PrivateKey: + // https://crypto.stackexchange.com/a/55483 + return p.Sign(rand.Reader, msg, crypto.Hash(0)) + case *ecdsa.PrivateKey: + hashed := hashAlgorithm.Digest(msg) + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + case *rsa.PrivateKey: + hashed := hashAlgorithm.Digest(msg) + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + } + + return nil, errKeySignatureGenerateUnimplemented +} + +func verifyKeySignature(message, remoteKeySignature []byte, hashAlgorithm hash.Algorithm, rawCertificates [][]byte) error { //nolint:dupl + if len(rawCertificates) == 0 { + return errLengthMismatch + } + certificate, err := x509.ParseCertificate(rawCertificates[0]) + if err != nil { + return err + } + + switch p := certificate.PublicKey.(type) { + case ed25519.PublicKey: + if ok := ed25519.Verify(p, message, remoteKeySignature); !ok { + return errKeySignatureMismatch + } + return nil + case *ecdsa.PublicKey: + ecdsaSig := &ecdsaSignature{} + if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errInvalidECDSASignature + } + hashed := hashAlgorithm.Digest(message) + if !ecdsa.Verify(p, hashed, ecdsaSig.R, ecdsaSig.S) { + return errKeySignatureMismatch + } + return nil + case *rsa.PublicKey: + switch certificate.SignatureAlgorithm { + case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA: + hashed := hashAlgorithm.Digest(message) + return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hashed, remoteKeySignature) + default: + return errKeySignatureVerifyUnimplemented + } + } + + return errKeySignatureVerifyUnimplemented +} + +// If the server has sent a CertificateRequest message, the client MUST send the Certificate +// message. The ClientKeyExchange message is now sent, and the content +// of that message will depend on the public key algorithm selected +// between the ClientHello and the ServerHello. If the client has sent +// a certificate with signing ability, a digitally-signed +// CertificateVerify message is sent to explicitly verify possession of +// the private key in the certificate. +// https://tools.ietf.org/html/rfc5246#section-7.3 +func generateCertificateVerify(handshakeBodies []byte, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) { + if p, ok := privateKey.(ed25519.PrivateKey); ok { + // https://pkg.go.dev/crypto/ed25519#PrivateKey.Sign + // Sign signs the given message with priv. Ed25519 performs two passes over + // messages to be signed and therefore cannot handle pre-hashed messages. + return p.Sign(rand.Reader, handshakeBodies, crypto.Hash(0)) + } + + h := sha256.New() + if _, err := h.Write(handshakeBodies); err != nil { + return nil, err + } + hashed := h.Sum(nil) + + switch p := privateKey.(type) { + case *ecdsa.PrivateKey: + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + case *rsa.PrivateKey: + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + } + + return nil, errInvalidSignatureAlgorithm +} + +func verifyCertificateVerify(handshakeBodies []byte, hashAlgorithm hash.Algorithm, remoteKeySignature []byte, rawCertificates [][]byte) error { //nolint:dupl + if len(rawCertificates) == 0 { + return errLengthMismatch + } + certificate, err := x509.ParseCertificate(rawCertificates[0]) + if err != nil { + return err + } + + switch p := certificate.PublicKey.(type) { + case ed25519.PublicKey: + if ok := ed25519.Verify(p, handshakeBodies, remoteKeySignature); !ok { + return errKeySignatureMismatch + } + return nil + case *ecdsa.PublicKey: + ecdsaSig := &ecdsaSignature{} + if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errInvalidECDSASignature + } + hash := hashAlgorithm.Digest(handshakeBodies) + if !ecdsa.Verify(p, hash, ecdsaSig.R, ecdsaSig.S) { + return errKeySignatureMismatch + } + return nil + case *rsa.PublicKey: + switch certificate.SignatureAlgorithm { + case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA: + hash := hashAlgorithm.Digest(handshakeBodies) + return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hash, remoteKeySignature) + default: + return errKeySignatureVerifyUnimplemented + } + } + + return errKeySignatureVerifyUnimplemented +} + +func loadCerts(rawCertificates [][]byte) ([]*x509.Certificate, error) { + if len(rawCertificates) == 0 { + return nil, errLengthMismatch + } + + certs := make([]*x509.Certificate, 0, len(rawCertificates)) + for _, rawCert := range rawCertificates { + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + return certs, nil +} + +func verifyClientCert(rawCertificates [][]byte, roots *x509.CertPool) (chains [][]*x509.Certificate, err error) { + certificate, err := loadCerts(rawCertificates) + if err != nil { + return nil, err + } + intermediateCAPool := x509.NewCertPool() + for _, cert := range certificate[1:] { + intermediateCAPool.AddCert(cert) + } + opts := x509.VerifyOptions{ + Roots: roots, + CurrentTime: time.Now(), + Intermediates: intermediateCAPool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + return certificate[0].Verify(opts) +} + +func verifyServerCert(rawCertificates [][]byte, roots *x509.CertPool, serverName string) (chains [][]*x509.Certificate, err error) { + certificate, err := loadCerts(rawCertificates) + if err != nil { + return nil, err + } + intermediateCAPool := x509.NewCertPool() + for _, cert := range certificate[1:] { + intermediateCAPool.AddCert(cert) + } + opts := x509.VerifyOptions{ + Roots: roots, + CurrentTime: time.Now(), + DNSName: serverName, + Intermediates: intermediateCAPool, + } + return certificate[0].Verify(opts) +} diff --git a/replace/dtls/crypto_test.go b/replace/dtls/crypto_test.go new file mode 100644 index 000000000..771ea3afa --- /dev/null +++ b/replace/dtls/crypto_test.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/hash" +) + +const rawPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxIA2BrrnR2sIlATsp7aRBD/3krwZ7vt9dNeoDQAee0s6SuYP +6MBx/HPnAkwNvPS90R05a7pwRkoT6Ur4PfPhCVlUe8lV+0Eto3ZSEeHz3HdsqlM3 +bso67L7Dqrc7MdVstlKcgJi8yeAoGOIL9/igOv0XBFCeznm9nznx6mnsR5cugw+1 +ypXelaHmBCLV7r5SeVSh57+KhvZGbQ2fFpUaTPegRpJZXBNS8lSeWvtOv9d6N5UB +ROTAJodMZT5AfX0jB0QB9IT/0I96H6BSENH08NXOeXApMuLKvnAf361rS7cRAfRL +rWZqERMP4u6Cnk0Cnckc3WcW27kGGIbtwbqUIQIDAQABAoIBAGF7OVIdZp8Hejn0 +N3L8HvT8xtUEe9kS6ioM0lGgvX5s035Uo4/T6LhUx0VcdXRH9eLHnLTUyN4V4cra +ZkxVsE3zAvZl60G6E+oDyLMWZOP6Wu4kWlub9597A5atT7BpMIVCdmFVZFLB4SJ3 +AXkC3nplFAYP+Lh1rJxRIrIn2g+pEeBboWbYA++oDNuMQffDZaokTkJ8Bn1JZYh0 +xEXKY8Bi2Egd5NMeZa1UFO6y8tUbZfwgVs6Enq5uOgtfayq79vZwyjj1kd29MBUD +8g8byV053ZKxbUOiOuUts97eb+fN3DIDRTcT2c+lXt/4C54M1FclJAbtYRK/qwsl +pYWKQAECgYEA4ZUbqQnTo1ICvj81ifGrz+H4LKQqe92Hbf/W51D/Umk2kP702W22 +HP4CvrJRtALThJIG9m2TwUjl/WAuZIBrhSAbIvc3Fcoa2HjdRp+sO5U1ueDq7d/S +Z+PxRI8cbLbRpEdIaoR46qr/2uWZ943PHMv9h4VHPYn1w8b94hwD6vkCgYEA3v87 +mFLzyM9ercnEv9zHMRlMZFQhlcUGQZvfb8BuJYl/WogyT6vRrUuM0QXULNEPlrin +mBQTqc1nCYbgkFFsD2VVt1qIyiAJsB9MD1LNV6YuvE7T2KOSadmsA4fa9PUqbr71 +hf3lTTq+LeR09LebO7WgSGYY+5YKVOEGpYMR1GkCgYEAxPVQmk3HKHEhjgRYdaG5 +lp9A9ZE8uruYVJWtiHgzBTxx9TV2iST+fd/We7PsHFTfY3+wbpcMDBXfIVRKDVwH +BMwchXH9+Ztlxx34bYJaegd0SmA0Hw9ugWEHNgoSEmWpM1s9wir5/ELjc7dGsFtz +uzvsl9fpdLSxDYgAAdzeGtkCgYBAzKIgrVox7DBzB8KojhtD5ToRnXD0+H/M6OKQ +srZPKhlb0V/tTtxrIx0UUEFLlKSXA6mPw6XDHfDnD86JoV9pSeUSlrhRI+Ysy6tq +eIE7CwthpPZiaYXORHZ7wCqcK/HcpJjsCs9rFbrV0yE5S3FMdIbTAvgXg44VBB7O +UbwIoQKBgDuY8gSrA5/A747wjjmsdRWK4DMTMEV4eCW1BEP7Tg7Cxd5n3xPJiYhr +nhLGN+mMnVIcv2zEMS0/eNZr1j/0BtEdx+3IC6Eq+ONY0anZ4Irt57/5QeKgKn/L +JPhfPySIPG4UmwE4gW8t79vfOKxnUu2fDD1ZXUYopan6EckACNH/ +-----END RSA PRIVATE KEY----- +` + +func TestGenerateKeySignature(t *testing.T) { + block, _ := pem.Decode([]byte(rawPrivateKey)) + key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + t.Error(err) + } + + clientRandom := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f} + serverRandom := []byte{0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f} + publicKey := []byte{0x20, 0x9f, 0xd7, 0xad, 0x6d, 0xcf, 0xf4, 0x29, 0x8d, 0xd3, 0xf9, 0x6d, 0x5b, 0x1b, 0x2a, 0xf9, 0x10, 0xa0, 0x53, 0x5b, 0x14, 0x88, 0xd7, 0xf8, 0xfa, 0xbb, 0x34, 0x9a, 0x98, 0x28, 0x80, 0xb6, 0x15} + expectedSignature := []byte{ + 0x6f, 0x47, 0x97, 0x85, 0xcc, 0x76, 0x50, 0x93, 0xbd, 0xe2, 0x6a, 0x69, 0x0b, 0xc3, 0x03, 0xd1, 0xb7, 0xe4, 0xab, 0x88, 0x7b, 0xa6, 0x52, 0x80, 0xdf, + 0xaa, 0x25, 0x7a, 0xdb, 0x29, 0x32, 0xe4, 0xd8, 0x28, 0x28, 0xb3, 0xe8, 0x04, 0x3c, 0x38, 0x16, 0xfc, 0x78, 0xe9, 0x15, 0x7b, 0xc5, 0xbd, 0x7d, 0xfc, + 0xcd, 0x83, 0x00, 0x57, 0x4a, 0x3c, 0x23, 0x85, 0x75, 0x6b, 0x37, 0xd5, 0x89, 0x72, 0x73, 0xf0, 0x44, 0x8c, 0x00, 0x70, 0x1f, 0x6e, 0xa2, 0x81, 0xd0, + 0x09, 0xc5, 0x20, 0x36, 0xab, 0x23, 0x09, 0x40, 0x1f, 0x4d, 0x45, 0x96, 0x62, 0xbb, 0x81, 0xb0, 0x30, 0x72, 0xad, 0x3a, 0x0a, 0xac, 0x31, 0x63, 0x40, + 0x52, 0x0a, 0x27, 0xf3, 0x34, 0xde, 0x27, 0x7d, 0xb7, 0x54, 0xff, 0x0f, 0x9f, 0x5a, 0xfe, 0x07, 0x0f, 0x4e, 0x9f, 0x53, 0x04, 0x34, 0x62, 0xf4, 0x30, + 0x74, 0x83, 0x35, 0xfc, 0xe4, 0x7e, 0xbf, 0x5a, 0xc4, 0x52, 0xd0, 0xea, 0xf9, 0x61, 0x4e, 0xf5, 0x1c, 0x0e, 0x58, 0x02, 0x71, 0xfb, 0x1f, 0x34, 0x55, + 0xe8, 0x36, 0x70, 0x3c, 0xc1, 0xcb, 0xc9, 0xb7, 0xbb, 0xb5, 0x1c, 0x44, 0x9a, 0x6d, 0x88, 0x78, 0x98, 0xd4, 0x91, 0x2e, 0xeb, 0x98, 0x81, 0x23, 0x30, + 0x73, 0x39, 0x43, 0xd5, 0xbb, 0x70, 0x39, 0xba, 0x1f, 0xdb, 0x70, 0x9f, 0x91, 0x83, 0x56, 0xc2, 0xde, 0xed, 0x17, 0x6d, 0x2c, 0x3e, 0x21, 0xea, 0x36, + 0xb4, 0x91, 0xd8, 0x31, 0x05, 0x60, 0x90, 0xfd, 0xc6, 0x74, 0xa9, 0x7b, 0x18, 0xfc, 0x1c, 0x6a, 0x1c, 0x6e, 0xec, 0xd3, 0xc1, 0xc0, 0x0d, 0x11, 0x25, + 0x48, 0x37, 0x3d, 0x45, 0x11, 0xa2, 0x31, 0x14, 0x0a, 0x66, 0x9f, 0xd8, 0xac, 0x74, 0xa2, 0xcd, 0xc8, 0x79, 0xb3, 0x9e, 0xc6, 0x66, 0x25, 0xcf, 0x2c, + 0x87, 0x5e, 0x5c, 0x36, 0x75, 0x86, + } + + signature, err := generateKeySignature(clientRandom, serverRandom, publicKey, elliptic.X25519, key, hash.SHA256) + if err != nil { + t.Error(err) + } else if !bytes.Equal(expectedSignature, signature) { + t.Errorf("Signature generation failed \nexp % 02x \nactual % 02x ", expectedSignature, signature) + } +} diff --git a/replace/dtls/dtls.go b/replace/dtls/dtls.go new file mode 100644 index 000000000..b799770d8 --- /dev/null +++ b/replace/dtls/dtls.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +// Package dtls implements Datagram Transport Layer Security (DTLS) 1.2 +package dtls diff --git a/replace/dtls/e2e/Dockerfile b/replace/dtls/e2e/Dockerfile new file mode 100644 index 000000000..68440e526 --- /dev/null +++ b/replace/dtls/e2e/Dockerfile @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: 2023 The Pion community +# SPDX-License-Identifier: MIT + +FROM docker.io/library/golang:1.18-bullseye + +COPY . /go/src/github.com/pion/dtls +WORKDIR /go/src/github.com/pion/dtls/e2e + +CMD ["go", "test", "-tags=openssl", "-v", "."] diff --git a/replace/dtls/e2e/e2e.go b/replace/dtls/e2e/e2e.go new file mode 100644 index 000000000..eb8007aab --- /dev/null +++ b/replace/dtls/e2e/e2e.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +// Package e2e contains end to end tests for pion/dtls +package e2e diff --git a/replace/dtls/e2e/e2e_lossy_test.go b/replace/dtls/e2e/e2e_lossy_test.go new file mode 100644 index 000000000..2789ec3e9 --- /dev/null +++ b/replace/dtls/e2e/e2e_lossy_test.go @@ -0,0 +1,210 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package e2e + +import ( + "crypto/tls" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/pion/dtls/v2" + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + transportTest "github.com/pion/transport/v2/test" +) + +const ( + flightInterval = time.Millisecond * 100 + lossyTestTimeout = 30 * time.Second +) + +/* +DTLS Client/Server over a lossy transport, just asserts it can handle at increasing increments +*/ +func TestPionE2ELossy(t *testing.T) { + // Check for leaking routines + report := transportTest.CheckRoutines(t) + defer report() + + type runResult struct { + dtlsConn *dtls.Conn + err error + } + + serverCert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + + clientCert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + LossChanceRange int + DoClientAuth bool + CipherSuites []dtls.CipherSuiteID + MTU int + }{ + { + LossChanceRange: 0, + }, + { + LossChanceRange: 10, + }, + { + LossChanceRange: 20, + }, + { + LossChanceRange: 50, + }, + { + LossChanceRange: 0, + DoClientAuth: true, + }, + { + LossChanceRange: 10, + DoClientAuth: true, + }, + { + LossChanceRange: 20, + DoClientAuth: true, + }, + { + LossChanceRange: 50, + DoClientAuth: true, + }, + { + LossChanceRange: 0, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + }, + { + LossChanceRange: 10, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + }, + { + LossChanceRange: 20, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + }, + { + LossChanceRange: 50, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, + }, + { + LossChanceRange: 10, + MTU: 100, + DoClientAuth: true, + }, + { + LossChanceRange: 20, + MTU: 100, + DoClientAuth: true, + }, + { + LossChanceRange: 50, + MTU: 100, + DoClientAuth: true, + }, + } { + name := fmt.Sprintf("Loss%d_MTU%d", test.LossChanceRange, test.MTU) + if test.DoClientAuth { + name += "_WithCliAuth" + } + for _, ciph := range test.CipherSuites { + name += "_With" + ciph.String() + } + test := test + t.Run(name, func(t *testing.T) { + // Limit runtime in case of deadlocks + lim := transportTest.TimeOut(lossyTestTimeout + time.Second) + defer lim.Stop() + + rand.Seed(time.Now().UTC().UnixNano()) + chosenLoss := rand.Intn(9) + test.LossChanceRange //nolint:gosec + serverDone := make(chan runResult) + clientDone := make(chan runResult) + br := transportTest.NewBridge() + + if err = br.SetLossChance(chosenLoss); err != nil { + t.Fatal(err) + } + + go func() { + cfg := &dtls.Config{ + FlightInterval: flightInterval, + CipherSuites: test.CipherSuites, + InsecureSkipVerify: true, + MTU: test.MTU, + } + + if test.DoClientAuth { + cfg.Certificates = []tls.Certificate{clientCert} + } + + client, startupErr := dtls.Client(br.GetConn0(), cfg) + clientDone <- runResult{client, startupErr} + }() + + go func() { + cfg := &dtls.Config{ + Certificates: []tls.Certificate{serverCert}, + FlightInterval: flightInterval, + MTU: test.MTU, + } + + if test.DoClientAuth { + cfg.ClientAuth = dtls.RequireAnyClientCert + } + + server, startupErr := dtls.Server(br.GetConn1(), cfg) + serverDone <- runResult{server, startupErr} + }() + + testTimer := time.NewTimer(lossyTestTimeout) + var serverConn, clientConn *dtls.Conn + defer func() { + if serverConn != nil { + if err = serverConn.Close(); err != nil { + t.Error(err) + } + } + if clientConn != nil { + if err = clientConn.Close(); err != nil { + t.Error(err) + } + } + }() + + for { + if serverConn != nil && clientConn != nil { + break + } + + br.Tick() + select { + case serverResult := <-serverDone: + if serverResult.err != nil { + t.Errorf("Fail, serverError: clientComplete(%t) serverComplete(%t) LossChance(%d) error(%v)", clientConn != nil, serverConn != nil, chosenLoss, serverResult.err) + return + } + + serverConn = serverResult.dtlsConn + case clientResult := <-clientDone: + if clientResult.err != nil { + t.Errorf("Fail, clientError: clientComplete(%t) serverComplete(%t) LossChance(%d) error(%v)", clientConn != nil, serverConn != nil, chosenLoss, clientResult.err) + return + } + + clientConn = clientResult.dtlsConn + case <-testTimer.C: + t.Errorf("Test expired: clientComplete(%t) serverComplete(%t) LossChance(%d)", clientConn != nil, serverConn != nil, chosenLoss) + return + case <-time.After(10 * time.Millisecond): + } + } + }) + } +} diff --git a/replace/dtls/e2e/e2e_openssl_test.go b/replace/dtls/e2e/e2e_openssl_test.go new file mode 100644 index 000000000..25bffb35a --- /dev/null +++ b/replace/dtls/e2e/e2e_openssl_test.go @@ -0,0 +1,330 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build openssl && !js +// +build openssl,!js + +package e2e + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "regexp" + "strings" + "testing" + "time" + + "github.com/pion/dtls/v2" +) + +func serverOpenSSL(c *comm) { + go func() { + c.serverMutex.Lock() + defer c.serverMutex.Unlock() + + cfg := c.serverConfig + + // create openssl arguments + args := []string{ + "s_server", + "-dtls1_2", + "-quiet", + "-verify_quiet", + "-verify_return_error", + fmt.Sprintf("-accept=%d", c.serverPort), + } + ciphers := ciphersOpenSSL(cfg) + if ciphers != "" { + args = append(args, fmt.Sprintf("-cipher=%s", ciphers)) + } + + // psk arguments + if cfg.PSK != nil { + psk, err := cfg.PSK(nil) + if err != nil { + c.errChan <- err + return + } + args = append(args, fmt.Sprintf("-psk=%X", psk)) + if len(cfg.PSKIdentityHint) > 0 { + args = append(args, fmt.Sprintf("-psk_hint=%s", cfg.PSKIdentityHint)) + } + } + + // certs arguments + if len(cfg.Certificates) > 0 { + // create temporary cert files + certPEM, keyPEM, err := writeTempPEM(cfg) + if err != nil { + c.errChan <- err + return + } + args = append(args, + fmt.Sprintf("-cert=%s", certPEM), + fmt.Sprintf("-key=%s", keyPEM)) + defer func() { + _ = os.Remove(certPEM) + _ = os.Remove(keyPEM) + }() + } else { + args = append(args, "-nocert") + } + + // launch command + // #nosec G204 + cmd := exec.CommandContext(c.ctx, "openssl", args...) + var inner net.Conn + inner, c.serverConn = net.Pipe() + cmd.Stdin = inner + cmd.Stdout = inner + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + c.errChan <- err + _ = inner.Close() + return + } + + // Ensure that server has started + time.Sleep(500 * time.Millisecond) + + c.serverReady <- struct{}{} + simpleReadWrite(c.errChan, c.serverChan, c.serverConn, c.messageRecvCount) + }() +} + +func clientOpenSSL(c *comm) { + select { + case <-c.serverReady: + // OK + case <-time.After(time.Second): + c.errChan <- errors.New("waiting on serverReady err: timeout") + } + + c.clientMutex.Lock() + defer c.clientMutex.Unlock() + + cfg := c.clientConfig + + // create openssl arguments + args := []string{ + "s_client", + "-dtls1_2", + "-quiet", + "-verify_quiet", + "-servername=localhost", + fmt.Sprintf("-connect=127.0.0.1:%d", c.serverPort), + } + ciphers := ciphersOpenSSL(cfg) + if ciphers != "" { + args = append(args, fmt.Sprintf("-cipher=%s", ciphers)) + } + + // psk arguments + if cfg.PSK != nil { + psk, err := cfg.PSK(nil) + if err != nil { + c.errChan <- err + return + } + args = append(args, fmt.Sprintf("-psk=%X", psk)) + } + + // certificate arguments + if len(cfg.Certificates) > 0 { + // create temporary cert files + certPEM, keyPEM, err := writeTempPEM(cfg) + if err != nil { + c.errChan <- err + return + } + args = append(args, fmt.Sprintf("-CAfile=%s", certPEM), fmt.Sprintf("-cert=%s", certPEM), fmt.Sprintf("-key=%s", keyPEM)) + defer func() { + _ = os.Remove(certPEM) + _ = os.Remove(keyPEM) + }() + } + if !cfg.InsecureSkipVerify { + args = append(args, "-verify_return_error") + } + + // launch command + // #nosec G204 + cmd := exec.CommandContext(c.ctx, "openssl", args...) + var inner net.Conn + inner, c.clientConn = net.Pipe() + cmd.Stdin = inner + cmd.Stdout = inner + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + c.errChan <- err + _ = inner.Close() + return + } + + simpleReadWrite(c.errChan, c.clientChan, c.clientConn, c.messageRecvCount) +} + +func ciphersOpenSSL(cfg *dtls.Config) string { + // See https://tls.mbed.org/supported-ssl-ciphersuites + translate := map[dtls.CipherSuiteID]string{ + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_CCM: "ECDHE-ECDSA-AES128-CCM", + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8: "ECDHE-ECDSA-AES128-CCM8", + + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "ECDHE-ECDSA-AES128-GCM-SHA256", + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "ECDHE-ECDSA-AES256-GCM-SHA384", + + dtls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "ECDHE-RSA-AES128-GCM-SHA256", + dtls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "ECDHE-RSA-AES256-GCM-SHA384", + + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "ECDHE-ECDSA-AES256-SHA", + dtls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "ECDHE-RSA-AES256-SHA", + + dtls.TLS_PSK_WITH_AES_128_CCM: "PSK-AES128-CCM", + dtls.TLS_PSK_WITH_AES_128_CCM_8: "PSK-AES128-CCM8", + dtls.TLS_PSK_WITH_AES_256_CCM_8: "PSK-AES256-CCM8", + + dtls.TLS_PSK_WITH_AES_128_GCM_SHA256: "PSK-AES128-GCM-SHA256", + + dtls.TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256: "ECDHE-PSK-AES128-CBC-SHA256", + } + + var ciphers []string + for _, c := range cfg.CipherSuites { + if text, ok := translate[c]; ok { + ciphers = append(ciphers, text) + } + } + return strings.Join(ciphers, ";") +} + +func writeTempPEM(cfg *dtls.Config) (string, string, error) { + certOut, err := ioutil.TempFile("", "cert.pem") + if err != nil { + return "", "", fmt.Errorf("failed to create temporary file: %w", err) + } + keyOut, err := ioutil.TempFile("", "key.pem") + if err != nil { + return "", "", fmt.Errorf("failed to create temporary file: %w", err) + } + + cert := cfg.Certificates[0] + derBytes := cert.Certificate[0] + if err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return "", "", fmt.Errorf("failed to write data to cert.pem: %w", err) + } + if err = certOut.Close(); err != nil { + return "", "", fmt.Errorf("error closing cert.pem: %w", err) + } + + priv := cert.PrivateKey + var privBytes []byte + privBytes, err = x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return "", "", fmt.Errorf("unable to marshal private key: %w", err) + } + if err = pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + return "", "", fmt.Errorf("failed to write data to key.pem: %w", err) + } + if err = keyOut.Close(); err != nil { + return "", "", fmt.Errorf("error closing key.pem: %w", err) + } + return certOut.Name(), keyOut.Name(), nil +} + +func minimumOpenSSLVersion(t *testing.T) bool { + t.Helper() + + cmd := exec.Command("openssl", "version") + allOut, err := cmd.CombinedOutput() + if err != nil { + t.Log("Cannot determine OpenSSL version: ", err) + return false + } + verMatch := regexp.MustCompile(`(?i)^OpenSSL\s(?P(\d+\.)?(\d+\.)?(\*|\d+)(\w)?).+$`) + match := verMatch.FindStringSubmatch(strings.TrimSpace(string(allOut))) + params := map[string]string{} + for i, name := range verMatch.SubexpNames() { + if i > 0 && i <= len(match) { + params[name] = match[i] + } + } + var ver string + if val, ok := params["version"]; !ok { + t.Log("Could not extract OpenSSL version") + return false + } else { + ver = val + } + + cmp := strings.Compare(ver, "3.0.0") + if cmp == -1 { + return false + } + return true +} + +func TestPionOpenSSLE2ESimple(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + testPionE2ESimple(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2ESimple(t, serverPion, clientOpenSSL) + }) +} + +func TestPionOpenSSLE2ESimplePSK(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + testPionE2ESimplePSK(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2ESimplePSK(t, serverPion, clientOpenSSL) + }) +} + +func TestPionOpenSSLE2EMTUs(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + testPionE2EMTUs(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2EMTUs(t, serverPion, clientOpenSSL) + }) +} + +func TestPionOpenSSLE2ESimpleED25519(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + if !minimumOpenSSLVersion(t) { + t.Skip("Cannot use OpenSSL < 3.0 as a DTLS server with ED25519 keys") + } + testPionE2ESimpleED25519(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2ESimpleED25519(t, serverPion, clientOpenSSL) + }) +} + +func TestPionOpenSSLE2ESimpleED25519ClientCert(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + if !minimumOpenSSLVersion(t) { + t.Skip("Cannot use OpenSSL < 3.0 as a DTLS server with ED25519 keys") + } + testPionE2ESimpleED25519ClientCert(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2ESimpleED25519ClientCert(t, serverPion, clientOpenSSL) + }) +} + +func TestPionOpenSSLE2ESimpleECDSAClientCert(t *testing.T) { + t.Run("OpenSSLServer", func(t *testing.T) { + testPionE2ESimpleECDSAClientCert(t, serverOpenSSL, clientPion) + }) + t.Run("OpenSSLClient", func(t *testing.T) { + testPionE2ESimpleECDSAClientCert(t, serverPion, clientOpenSSL) + }) +} diff --git a/replace/dtls/e2e/e2e_test.go b/replace/dtls/e2e/e2e_test.go new file mode 100644 index 000000000..25514eff8 --- /dev/null +++ b/replace/dtls/e2e/e2e_test.go @@ -0,0 +1,525 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build !js +// +build !js + +package e2e + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/pion/dtls/v2" + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + "github.com/pion/transport/v2/test" +) + +const ( + testMessage = "Hello World" + testTimeLimit = 5 * time.Second + messageRetry = 200 * time.Millisecond +) + +var errServerTimeout = errors.New("waiting on serverReady err: timeout") + +func randomPort(t testing.TB) int { + t.Helper() + conn, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to pickPort: %v", err) + } + defer func() { + _ = conn.Close() + }() + switch addr := conn.LocalAddr().(type) { + case *net.UDPAddr: + return addr.Port + default: + t.Fatalf("unknown addr type %T", addr) + return 0 + } +} + +func simpleReadWrite(errChan chan error, outChan chan string, conn io.ReadWriter, messageRecvCount *uint64) { + go func() { + buffer := make([]byte, 8192) + n, err := conn.Read(buffer) + if err != nil { + errChan <- err + return + } + + outChan <- string(buffer[:n]) + atomic.AddUint64(messageRecvCount, 1) + }() + + for { + if atomic.LoadUint64(messageRecvCount) == 2 { + break + } else if _, err := conn.Write([]byte(testMessage)); err != nil { + errChan <- err + break + } + + time.Sleep(messageRetry) + } +} + +type comm struct { + ctx context.Context + clientConfig, serverConfig *dtls.Config + serverPort int + messageRecvCount *uint64 // Counter to make sure both sides got a message + clientMutex *sync.Mutex + clientConn net.Conn + serverMutex *sync.Mutex + serverConn net.Conn + serverListener net.Listener + serverReady chan struct{} + errChan chan error + clientChan chan string + serverChan chan string + client func(*comm) + server func(*comm) +} + +func newComm(ctx context.Context, clientConfig, serverConfig *dtls.Config, serverPort int, server, client func(*comm)) *comm { + messageRecvCount := uint64(0) + c := &comm{ + ctx: ctx, + clientConfig: clientConfig, + serverConfig: serverConfig, + serverPort: serverPort, + messageRecvCount: &messageRecvCount, + clientMutex: &sync.Mutex{}, + serverMutex: &sync.Mutex{}, + serverReady: make(chan struct{}), + errChan: make(chan error), + clientChan: make(chan string), + serverChan: make(chan string), + server: server, + client: client, + } + return c +} + +func (c *comm) assert(t *testing.T) { + // DTLS Client + go c.client(c) + + // DTLS Server + go c.server(c) + + defer func() { + if c.clientConn != nil { + if err := c.clientConn.Close(); err != nil { + t.Fatal(err) + } + } + if c.serverConn != nil { + if err := c.serverConn.Close(); err != nil { + t.Fatal(err) + } + } + if c.serverListener != nil { + if err := c.serverListener.Close(); err != nil { + t.Fatal(err) + } + } + }() + + func() { + seenClient, seenServer := false, false + for { + select { + case err := <-c.errChan: + t.Fatal(err) + case <-time.After(testTimeLimit): + t.Fatalf("Test timeout, seenClient %t seenServer %t", seenClient, seenServer) + case clientMsg := <-c.clientChan: + if clientMsg != testMessage { + t.Fatalf("clientMsg does not equal test message: %s %s", clientMsg, testMessage) + } + + seenClient = true + if seenClient && seenServer { + return + } + case serverMsg := <-c.serverChan: + if serverMsg != testMessage { + t.Fatalf("serverMsg does not equal test message: %s %s", serverMsg, testMessage) + } + + seenServer = true + if seenClient && seenServer { + return + } + } + } + }() +} + +func clientPion(c *comm) { + select { + case <-c.serverReady: + // OK + case <-time.After(time.Second): + c.errChan <- errServerTimeout + } + + c.clientMutex.Lock() + defer c.clientMutex.Unlock() + + var err error + c.clientConn, err = dtls.DialWithContext(c.ctx, "udp", + &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: c.serverPort}, + c.clientConfig, + ) + if err != nil { + c.errChan <- err + return + } + + simpleReadWrite(c.errChan, c.clientChan, c.clientConn, c.messageRecvCount) +} + +func serverPion(c *comm) { + c.serverMutex.Lock() + defer c.serverMutex.Unlock() + + var err error + c.serverListener, err = dtls.Listen("udp", + &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: c.serverPort}, + c.serverConfig, + ) + if err != nil { + c.errChan <- err + return + } + c.serverReady <- struct{}{} + c.serverConn, err = c.serverListener.Accept() + if err != nil { + c.errChan <- err + return + } + + simpleReadWrite(c.errChan, c.serverChan, c.serverConn, c.messageRecvCount) +} + +/* + Simple DTLS Client/Server can communicate + - Assert that you can send messages both ways + - Assert that Close() on both ends work + - Assert that no Goroutines are leaked +*/ +func testPionE2ESimple(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, cipherSuite := range []dtls.CipherSuiteID{ + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + } { + cipherSuite := cipherSuite + t.Run(cipherSuite.String(), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + cert, err := selfsign.GenerateSelfSignedWithDNS("localhost") + if err != nil { + t.Fatal(err) + } + + cfg := &dtls.Config{ + Certificates: []tls.Certificate{cert}, + CipherSuites: []dtls.CipherSuiteID{cipherSuite}, + InsecureSkipVerify: true, + } + serverPort := randomPort(t) + comm := newComm(ctx, cfg, cfg, serverPort, server, client) + comm.assert(t) + }) + } +} + +func testPionE2ESimplePSK(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, cipherSuite := range []dtls.CipherSuiteID{ + dtls.TLS_PSK_WITH_AES_128_CCM, + dtls.TLS_PSK_WITH_AES_128_CCM_8, + dtls.TLS_PSK_WITH_AES_256_CCM_8, + dtls.TLS_PSK_WITH_AES_128_GCM_SHA256, + dtls.TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + } { + cipherSuite := cipherSuite + t.Run(cipherSuite.String(), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + cfg := &dtls.Config{ + PSK: func(hint []byte) ([]byte, error) { + return []byte{0xAB, 0xC1, 0x23}, nil + }, + PSKIdentityHint: []byte{0x01, 0x02, 0x03, 0x04, 0x05}, + CipherSuites: []dtls.CipherSuiteID{cipherSuite}, + } + serverPort := randomPort(t) + comm := newComm(ctx, cfg, cfg, serverPort, server, client) + comm.assert(t) + }) + } +} + +func testPionE2EMTUs(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, mtu := range []int{ + 10000, + 1000, + 100, + } { + mtu := mtu + t.Run(fmt.Sprintf("MTU%d", mtu), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cert, err := selfsign.GenerateSelfSignedWithDNS("localhost") + if err != nil { + t.Fatal(err) + } + + cfg := &dtls.Config{ + Certificates: []tls.Certificate{cert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + InsecureSkipVerify: true, + MTU: mtu, + } + serverPort := randomPort(t) + comm := newComm(ctx, cfg, cfg, serverPort, server, client) + comm.assert(t) + }) + } +} + +func testPionE2ESimpleED25519(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, cipherSuite := range []dtls.CipherSuiteID{ + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_CCM, + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8, + dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + dtls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + } { + cipherSuite := cipherSuite + t.Run(cipherSuite.String(), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, key, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + cert, err := selfsign.SelfSign(key) + if err != nil { + t.Fatal(err) + } + + cfg := &dtls.Config{ + Certificates: []tls.Certificate{cert}, + CipherSuites: []dtls.CipherSuiteID{cipherSuite}, + InsecureSkipVerify: true, + } + serverPort := randomPort(t) + comm := newComm(ctx, cfg, cfg, serverPort, server, client) + comm.assert(t) + }) + } +} + +func testPionE2ESimpleED25519ClientCert(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, skey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + scert, err := selfsign.SelfSign(skey) + if err != nil { + t.Fatal(err) + } + + _, ckey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + ccert, err := selfsign.SelfSign(ckey) + if err != nil { + t.Fatal(err) + } + + scfg := &dtls.Config{ + Certificates: []tls.Certificate{scert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ClientAuth: dtls.RequireAnyClientCert, + } + ccfg := &dtls.Config{ + Certificates: []tls.Certificate{ccert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + InsecureSkipVerify: true, + } + serverPort := randomPort(t) + comm := newComm(ctx, ccfg, scfg, serverPort, server, client) + comm.assert(t) +} + +func testPionE2ESimpleECDSAClientCert(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + scert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + + ccert, err := selfsign.GenerateSelfSigned() + if err != nil { + t.Fatal(err) + } + + clientCAs := x509.NewCertPool() + caCert, err := x509.ParseCertificate(ccert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + clientCAs.AddCert(caCert) + + scfg := &dtls.Config{ + ClientCAs: clientCAs, + Certificates: []tls.Certificate{scert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + ClientAuth: dtls.RequireAnyClientCert, + } + ccfg := &dtls.Config{ + Certificates: []tls.Certificate{ccert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + InsecureSkipVerify: true, + } + serverPort := randomPort(t) + comm := newComm(ctx, ccfg, scfg, serverPort, server, client) + comm.assert(t) +} + +func testPionE2ESimpleRSAClientCert(t *testing.T, server, client func(*comm)) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + spriv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + scert, err := selfsign.SelfSign(spriv) + if err != nil { + t.Fatal(err) + } + + cpriv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + ccert, err := selfsign.SelfSign(cpriv) + if err != nil { + t.Fatal(err) + } + + scfg := &dtls.Config{ + Certificates: []tls.Certificate{scert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + ClientAuth: dtls.RequireAnyClientCert, + } + ccfg := &dtls.Config{ + Certificates: []tls.Certificate{ccert}, + CipherSuites: []dtls.CipherSuiteID{dtls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + InsecureSkipVerify: true, + } + serverPort := randomPort(t) + comm := newComm(ctx, ccfg, scfg, serverPort, server, client) + comm.assert(t) +} + +func TestPionE2ESimple(t *testing.T) { + testPionE2ESimple(t, serverPion, clientPion) +} + +func TestPionE2ESimplePSK(t *testing.T) { + testPionE2ESimplePSK(t, serverPion, clientPion) +} + +func TestPionE2EMTUs(t *testing.T) { + testPionE2EMTUs(t, serverPion, clientPion) +} + +func TestPionE2ESimpleED25519(t *testing.T) { + testPionE2ESimpleED25519(t, serverPion, clientPion) +} + +func TestPionE2ESimpleED25519ClientCert(t *testing.T) { + testPionE2ESimpleED25519ClientCert(t, serverPion, clientPion) +} + +func TestPionE2ESimpleECDSAClientCert(t *testing.T) { + testPionE2ESimpleECDSAClientCert(t, serverPion, clientPion) +} + +func TestPionE2ESimpleRSAClientCert(t *testing.T) { + testPionE2ESimpleRSAClientCert(t, serverPion, clientPion) +} diff --git a/replace/dtls/errors.go b/replace/dtls/errors.go new file mode 100644 index 000000000..025d8645e --- /dev/null +++ b/replace/dtls/errors.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" +) + +// Typed errors +var ( + ErrConnClosed = &FatalError{Err: errors.New("conn is closed")} //nolint:goerr113 + + errDeadlineExceeded = &TimeoutError{Err: fmt.Errorf("read/write timeout: %w", context.DeadlineExceeded)} + errInvalidContentType = &TemporaryError{Err: errors.New("invalid content type")} //nolint:goerr113 + + errBufferTooSmall = &TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errContextUnsupported = &TemporaryError{Err: errors.New("context is not supported for ExportKeyingMaterial")} //nolint:goerr113 + errHandshakeInProgress = &TemporaryError{Err: errors.New("handshake is in progress")} //nolint:goerr113 + errReservedExportKeyingMaterial = &TemporaryError{Err: errors.New("ExportKeyingMaterial can not be used with a reserved label")} //nolint:goerr113 + errApplicationDataEpochZero = &TemporaryError{Err: errors.New("ApplicationData with epoch of 0")} //nolint:goerr113 + errUnhandledContextType = &TemporaryError{Err: errors.New("unhandled contentType")} //nolint:goerr113 + + errCertificateVerifyNoCertificate = &FatalError{Err: errors.New("client sent certificate verify but we have no certificate to verify")} //nolint:goerr113 + errCipherSuiteNoIntersection = &FatalError{Err: errors.New("client+server do not support any shared cipher suites")} //nolint:goerr113 + errClientCertificateNotVerified = &FatalError{Err: errors.New("client sent certificate but did not verify it")} //nolint:goerr113 + errClientCertificateRequired = &FatalError{Err: errors.New("server required client verification, but got none")} //nolint:goerr113 + errClientNoMatchingSRTPProfile = &FatalError{Err: errors.New("server responded with SRTP Profile we do not support")} //nolint:goerr113 + errClientRequiredButNoServerEMS = &FatalError{Err: errors.New("client required Extended Master Secret extension, but server does not support it")} //nolint:goerr113 + errCookieMismatch = &FatalError{Err: errors.New("client+server cookie does not match")} //nolint:goerr113 + errIdentityNoPSK = &FatalError{Err: errors.New("PSK Identity Hint provided but PSK is nil")} //nolint:goerr113 + errInvalidCertificate = &FatalError{Err: errors.New("no certificate provided")} //nolint:goerr113 + errInvalidCipherSuite = &FatalError{Err: errors.New("invalid or unknown cipher suite")} //nolint:goerr113 + errInvalidECDSASignature = &FatalError{Err: errors.New("ECDSA signature contained zero or negative values")} //nolint:goerr113 + errInvalidPrivateKey = &FatalError{Err: errors.New("invalid private key type")} //nolint:goerr113 + errInvalidSignatureAlgorithm = &FatalError{Err: errors.New("invalid signature algorithm")} //nolint:goerr113 + errKeySignatureMismatch = &FatalError{Err: errors.New("expected and actual key signature do not match")} //nolint:goerr113 + errNilNextConn = &FatalError{Err: errors.New("Conn can not be created with a nil nextConn")} //nolint:goerr113 + errNoAvailableCipherSuites = &FatalError{Err: errors.New("connection can not be created, no CipherSuites satisfy this Config")} //nolint:goerr113 + errNoAvailablePSKCipherSuite = &FatalError{Err: errors.New("connection can not be created, pre-shared key present but no compatible CipherSuite")} //nolint:goerr113 + errNoAvailableCertificateCipherSuite = &FatalError{Err: errors.New("connection can not be created, certificate present but no compatible CipherSuite")} //nolint:goerr113 + errNoAvailableSignatureSchemes = &FatalError{Err: errors.New("connection can not be created, no SignatureScheme satisfy this Config")} //nolint:goerr113 + errNoCertificates = &FatalError{Err: errors.New("no certificates configured")} //nolint:goerr113 + errNoConfigProvided = &FatalError{Err: errors.New("no config provided")} //nolint:goerr113 + errNoSupportedEllipticCurves = &FatalError{Err: errors.New("client requested zero or more elliptic curves that are not supported by the server")} //nolint:goerr113 + errUnsupportedProtocolVersion = &FatalError{Err: errors.New("unsupported protocol version")} //nolint:goerr113 + errPSKAndIdentityMustBeSetForClient = &FatalError{Err: errors.New("PSK and PSK Identity Hint must both be set for client")} //nolint:goerr113 + errRequestedButNoSRTPExtension = &FatalError{Err: errors.New("SRTP support was requested but server did not respond with use_srtp extension")} //nolint:goerr113 + errServerNoMatchingSRTPProfile = &FatalError{Err: errors.New("client requested SRTP but we have no matching profiles")} //nolint:goerr113 + errServerRequiredButNoClientEMS = &FatalError{Err: errors.New("server requires the Extended Master Secret extension, but the client does not support it")} //nolint:goerr113 + errVerifyDataMismatch = &FatalError{Err: errors.New("expected and actual verify data does not match")} //nolint:goerr113 + errNotAcceptableCertificateChain = &FatalError{Err: errors.New("certificate chain is not signed by an acceptable CA")} //nolint:goerr113 + + errInvalidFlight = &InternalError{Err: errors.New("invalid flight number")} //nolint:goerr113 + errKeySignatureGenerateUnimplemented = &InternalError{Err: errors.New("unable to generate key signature, unimplemented")} //nolint:goerr113 + errKeySignatureVerifyUnimplemented = &InternalError{Err: errors.New("unable to verify key signature, unimplemented")} //nolint:goerr113 + errLengthMismatch = &InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113 + errSequenceNumberOverflow = &InternalError{Err: errors.New("sequence number overflow")} //nolint:goerr113 + errInvalidFSMTransition = &InternalError{Err: errors.New("invalid state machine transition")} //nolint:goerr113 + errFailedToAccessPoolReadBuffer = &InternalError{Err: errors.New("failed to access pool read buffer")} //nolint:goerr113 + errFragmentBufferOverflow = &InternalError{Err: errors.New("fragment buffer overflow")} //nolint:goerr113 +) + +// FatalError indicates that the DTLS connection is no longer available. +// It is mainly caused by wrong configuration of server or client. +type FatalError = protocol.FatalError + +// InternalError indicates and internal error caused by the implementation, and the DTLS connection is no longer available. +// It is mainly caused by bugs or tried to use unimplemented features. +type InternalError = protocol.InternalError + +// TemporaryError indicates that the DTLS connection is still available, but the request was failed temporary. +type TemporaryError = protocol.TemporaryError + +// TimeoutError indicates that the request was timed out. +type TimeoutError = protocol.TimeoutError + +// HandshakeError indicates that the handshake failed. +type HandshakeError = protocol.HandshakeError + +// errInvalidCipherSuite indicates an attempt at using an unsupported cipher suite. +type invalidCipherSuiteError struct { + id CipherSuiteID +} + +func (e *invalidCipherSuiteError) Error() string { + return fmt.Sprintf("CipherSuite with id(%d) is not valid", e.id) +} + +func (e *invalidCipherSuiteError) Is(err error) bool { + var other *invalidCipherSuiteError + if errors.As(err, &other) { + return e.id == other.id + } + return false +} + +// errAlert wraps DTLS alert notification as an error +type alertError struct { + *alert.Alert +} + +func (e *alertError) Error() string { + return fmt.Sprintf("alert: %s", e.Alert.String()) +} + +func (e *alertError) IsFatalOrCloseNotify() bool { + return e.Level == alert.Fatal || e.Description == alert.CloseNotify +} + +func (e *alertError) Is(err error) bool { + var other *alertError + if errors.As(err, &other) { + return e.Level == other.Level && e.Description == other.Description + } + return false +} + +// netError translates an error from underlying Conn to corresponding net.Error. +func netError(err error) error { + switch { + case errors.Is(err, io.EOF), errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + // Return io.EOF and context errors as is. + return err + } + + var ( + ne net.Error + opError *net.OpError + se *os.SyscallError + ) + + if errors.As(err, &opError) { + if errors.As(opError, &se) { + if se.Timeout() { + return &TimeoutError{Err: err} + } + if isOpErrorTemporary(se) { + return &TemporaryError{Err: err} + } + } + } + + if errors.As(err, &ne) { + return err + } + + return &FatalError{Err: err} +} diff --git a/replace/dtls/errors_errno.go b/replace/dtls/errors_errno.go new file mode 100644 index 000000000..f8e424eb3 --- /dev/null +++ b/replace/dtls/errors_errno.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build aix || darwin || dragonfly || freebsd || linux || nacl || nacljs || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux nacl nacljs netbsd openbsd solaris windows + +// For systems having syscall.Errno. +// Update build targets by following command: +// $ grep -R ECONN $(go env GOROOT)/src/syscall/zerrors_*.go \ +// | tr "." "_" | cut -d"_" -f"2" | sort | uniq + +package dtls + +import ( + "errors" + "os" + "syscall" +) + +func isOpErrorTemporary(err *os.SyscallError) bool { + return errors.Is(err.Err, syscall.ECONNREFUSED) +} diff --git a/replace/dtls/errors_errno_test.go b/replace/dtls/errors_errno_test.go new file mode 100644 index 000000000..5b4b209d1 --- /dev/null +++ b/replace/dtls/errors_errno_test.go @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build aix || darwin || dragonfly || freebsd || linux || nacl || nacljs || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux nacl nacljs netbsd openbsd solaris windows + +// For systems having syscall.Errno. +// The build target must be same as errors_errno.go. + +package dtls + +import ( + "errors" + "net" + "testing" +) + +func TestErrorsTemporary(t *testing.T) { + addrListen, errListen := net.ResolveUDPAddr("udp", "localhost:0") + if errListen != nil { + t.Fatalf("Unexpected error: %v", errListen) + } + // Server is not listening. + conn, errDial := net.DialUDP("udp", nil, addrListen) + if errDial != nil { + t.Fatalf("Unexpected error: %v", errDial) + } + + _, _ = conn.Write([]byte{0x00}) // trigger + _, err := conn.Read(make([]byte, 10)) + _ = conn.Close() + + if err == nil { + t.Skip("ECONNREFUSED is not set by system") + } + + var ne net.Error + if !errors.As(netError(err), &ne) { + t.Fatalf("netError must return net.Error") + } + + if ne.Timeout() { + t.Errorf("%v must not be timeout error", err) + } + if !ne.Temporary() { //nolint:staticcheck + t.Errorf("%v must be temporary error", err) + } +} diff --git a/replace/dtls/errors_noerrno.go b/replace/dtls/errors_noerrno.go new file mode 100644 index 000000000..844ff1e75 --- /dev/null +++ b/replace/dtls/errors_noerrno.go @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !nacl && !nacljs && !netbsd && !openbsd && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!nacl,!nacljs,!netbsd,!openbsd,!solaris,!windows + +// For systems without syscall.Errno. +// Build targets must be inverse of errors_errno.go + +package dtls + +import ( + "os" +) + +func isOpErrorTemporary(err *os.SyscallError) bool { + return false +} diff --git a/replace/dtls/errors_test.go b/replace/dtls/errors_test.go new file mode 100644 index 000000000..05c2c2745 --- /dev/null +++ b/replace/dtls/errors_test.go @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "errors" + "fmt" + "net" + "testing" +) + +var errExample = errors.New("an example error") + +func TestErrorUnwrap(t *testing.T) { + cases := []struct { + err error + errUnwrapped []error + }{ + { + &FatalError{Err: errExample}, + []error{errExample}, + }, + { + &TemporaryError{Err: errExample}, + []error{errExample}, + }, + { + &InternalError{Err: errExample}, + []error{errExample}, + }, + { + &TimeoutError{Err: errExample}, + []error{errExample}, + }, + { + &HandshakeError{Err: errExample}, + []error{errExample}, + }, + } + for _, c := range cases { + c := c + t.Run(fmt.Sprintf("%T", c.err), func(t *testing.T) { + err := c.err + for _, unwrapped := range c.errUnwrapped { + e := errors.Unwrap(err) + if !errors.Is(e, unwrapped) { + t.Errorf("Unwrapped error is expected to be '%v', got '%v'", unwrapped, e) + } + } + }) + } +} + +func TestErrorNetError(t *testing.T) { + cases := []struct { + err error + str string + timeout, temporary bool + }{ + {&FatalError{Err: errExample}, "dtls fatal: an example error", false, false}, + {&TemporaryError{Err: errExample}, "dtls temporary: an example error", false, true}, + {&InternalError{Err: errExample}, "dtls internal: an example error", false, false}, + {&TimeoutError{Err: errExample}, "dtls timeout: an example error", true, true}, + {&HandshakeError{Err: errExample}, "handshake error: an example error", false, false}, + {&HandshakeError{Err: &TimeoutError{Err: errExample}}, "handshake error: dtls timeout: an example error", true, true}, + } + for _, c := range cases { + c := c + t.Run(fmt.Sprintf("%T", c.err), func(t *testing.T) { + var ne net.Error + if !errors.As(c.err, &ne) { + t.Fatalf("%T doesn't implement net.Error", c.err) + } + if ne.Timeout() != c.timeout { + t.Errorf("%T.Timeout() should be %v", c.err, c.timeout) + } + if ne.Temporary() != c.temporary { //nolint:staticcheck + t.Errorf("%T.Temporary() should be %v", c.err, c.temporary) + } + if ne.Error() != c.str { + t.Errorf("%T.Error() should be %v", c.err, c.str) + } + }) + } +} diff --git a/replace/dtls/flight.go b/replace/dtls/flight.go new file mode 100644 index 000000000..cfa58c574 --- /dev/null +++ b/replace/dtls/flight.go @@ -0,0 +1,104 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +/* + DTLS messages are grouped into a series of message flights, according + to the diagrams below. Although each flight of messages may consist + of a number of messages, they should be viewed as monolithic for the + purpose of timeout and retransmission. + https://tools.ietf.org/html/rfc4347#section-4.2.4 + + Message flights for full handshake: + + Client Server + ------ ------ + Waiting Flight 0 + + ClientHello --------> Flight 1 + + <------- HelloVerifyRequest Flight 2 + + ClientHello --------> Flight 3 + + ServerHello \ + Certificate* \ + ServerKeyExchange* Flight 4 + CertificateRequest* / + <-------- ServerHelloDone / + + Certificate* \ + ClientKeyExchange \ + CertificateVerify* Flight 5 + [ChangeCipherSpec] / + Finished --------> / + + [ChangeCipherSpec] \ Flight 6 + <-------- Finished / + + Message flights for session-resuming handshake (no cookie exchange): + + Client Server + ------ ------ + Waiting Flight 0 + + ClientHello --------> Flight 1 + + ServerHello \ + [ChangeCipherSpec] Flight 4b + <-------- Finished / + + [ChangeCipherSpec] \ Flight 5b + Finished --------> / + + [ChangeCipherSpec] \ Flight 6 + <-------- Finished / +*/ + +type flightVal uint8 + +const ( + flight0 flightVal = iota + 1 + flight1 + flight2 + flight3 + flight4 + flight4b + flight5 + flight5b + flight6 +) + +func (f flightVal) String() string { + switch f { + case flight0: + return "Flight 0" + case flight1: + return "Flight 1" + case flight2: + return "Flight 2" + case flight3: + return "Flight 3" + case flight4: + return "Flight 4" + case flight4b: + return "Flight 4b" + case flight5: + return "Flight 5" + case flight5b: + return "Flight 5b" + case flight6: + return "Flight 6" + default: + return "Invalid Flight" + } +} + +func (f flightVal) isLastSendFlight() bool { + return f == flight6 || f == flight5b +} + +func (f flightVal) isLastRecvFlight() bool { + return f == flight5 || f == flight4b +} diff --git a/replace/dtls/flight0handler.go b/replace/dtls/flight0handler.go new file mode 100644 index 000000000..21ad205a6 --- /dev/null +++ b/replace/dtls/flight0handler.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "crypto/rand" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +func flight0Parse(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + seq, msgs, ok := cache.fullPullMap(0, state.cipherSuite, + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + var clientHello *handshake.MessageClientHello + + // Validate type + if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if !clientHello.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + + state.remoteRandom = clientHello.Random + + cipherSuites := []CipherSuite{} + for _, id := range clientHello.CipherSuiteIDs { + if c := cipherSuiteForID(CipherSuiteID(id), cfg.customCipherSuites); c != nil { + cipherSuites = append(cipherSuites, c) + } + } + + if state.cipherSuite, ok = findMatchingCipherSuite(cipherSuites, cfg.localCipherSuites); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection + } + + for _, val := range clientHello.Extensions { + switch e := val.(type) { + case *extension.SupportedEllipticCurves: + if len(e.EllipticCurves) == 0 { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoSupportedEllipticCurves + } + state.namedCurve = e.EllipticCurves[0] + case *extension.UseSRTP: + profile, ok := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles) + if !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerNoMatchingSRTPProfile + } + state.setSRTPProtectionProfile(profile) + case *extension.UseExtendedMasterSecret: + if cfg.extendedMasterSecret != DisableExtendedMasterSecret { + state.extendedMasterSecret = true + } + case *extension.ServerName: + state.serverName = e.ServerName // remote server name + case *extension.ALPN: + state.peerSupportedProtocols = e.ProtocolNameList + } + } + + if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerRequiredButNoClientEMS + } + + if state.localKeypair == nil { + var err error + state.localKeypair, err = elliptic.GenerateKeypair(state.namedCurve) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } + + nextFlight := flight2 + + if cfg.insecureSkipHelloVerify { + nextFlight = flight4 + } + + return handleHelloResume(clientHello.SessionID, state, cfg, nextFlight) +} + +func handleHelloResume(sessionID []byte, state *State, cfg *handshakeConfig, next flightVal) (flightVal, *alert.Alert, error) { + if len(sessionID) > 0 && cfg.sessionStore != nil { + if s, err := cfg.sessionStore.Get(sessionID); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } else if s.ID != nil { + cfg.log.Tracef("[handshake] resume session: %x", sessionID) + + state.SessionID = sessionID + state.masterSecret = s.Secret + + if err := state.initCipherSuite(); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + clientRandom := state.localRandom.MarshalFixed() + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return flight4b, nil, nil + } + } + return next, nil, nil +} + +func flight0Generate(_ context.Context, _ flightConn, state *State, _ *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + // Initialize + if !cfg.insecureSkipHelloVerify { + state.cookie = make([]byte, cookieLength) + if _, err := rand.Read(state.cookie); err != nil { + return nil, nil, err + } + } + + var zeroEpoch uint16 + state.localEpoch.Store(zeroEpoch) + state.remoteEpoch.Store(zeroEpoch) + state.namedCurve = defaultNamedCurve + + if err := state.localRandom.Populate(); err != nil { + return nil, nil, err + } + + return nil, nil, nil +} diff --git a/replace/dtls/flight1handler.go b/replace/dtls/flight1handler.go new file mode 100644 index 000000000..90aa5e718 --- /dev/null +++ b/replace/dtls/flight1handler.go @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + + inproxy_dtls "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy/dtls" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func flight1Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + // HelloVerifyRequest can be skipped by the server, + // so allow ServerHello during flight1 also + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, true}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok := msgs[handshake.TypeServerHello]; ok { + // Flight1 and flight2 were skipped. + // Parse as flight3. + return flight3Parse(ctx, c, state, cache, cfg) + } + + if h, ok := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); ok { + // DTLS 1.2 clients must not assume that the server will use the protocol version + // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1 + if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + state.cookie = append([]byte{}, h.Cookie...) + state.handshakeRecvSequence = seq + return flight3, nil, nil + } + + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil +} + +// [Psiphon] +// The API for this Psiphon fork is identical to upstream, apart from this +// symbol, which may be used to verify that the fork is used when compiling. +const IsPsiphon = true + +func flight1Generate(ctx context.Context, c flightConn, state *State, _ *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var zeroEpoch uint16 + state.localEpoch.Store(zeroEpoch) + state.remoteEpoch.Store(zeroEpoch) + state.namedCurve = defaultNamedCurve + state.cookie = nil + + if err := state.localRandom.Populate(); err != nil { + return nil, nil, err + } + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + if state.isClient && cfg.customClientHelloRandom != nil { + state.localRandom.RandomBytes = cfg.customClientHelloRandom() + } + + extensions := []extension.Extension{ + &extension.SupportedSignatureAlgorithms{ + SignatureHashAlgorithms: cfg.localSignatureSchemes, + }, + &extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }, + } + + var setEllipticCurveCryptographyClientHelloExtensions bool + for _, c := range cfg.localCipherSuites { + if c.ECC() { + setEllipticCurveCryptographyClientHelloExtensions = true + break + } + } + + if setEllipticCurveCryptographyClientHelloExtensions { + extensions = append(extensions, []extension.Extension{ + &extension.SupportedEllipticCurves{ + EllipticCurves: cfg.ellipticCurves, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + }...) + } + + if len(cfg.localSRTPProtectionProfiles) > 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: cfg.localSRTPProtectionProfiles, + }) + } + + if cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + + if len(cfg.serverName) > 0 { + extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName}) + } + + if len(cfg.supportedProtocols) > 0 { + extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols}) + } + + if cfg.sessionStore != nil { + cfg.log.Tracef("[handshake] try to resume session") + if s, err := cfg.sessionStore.Get(c.sessionKey()); err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } else if s.ID != nil { + cfg.log.Tracef("[handshake] get saved session: %x", s.ID) + + state.SessionID = s.ID + state.masterSecret = s.Secret + } + } + + cipherSuites := cipherSuiteIDs(cfg.localCipherSuites) + + // [Psiphon] + // Randomize ClientHello + seed, err := inproxy_dtls.GetDTLSSeed(ctx) + if err != nil { + return nil, nil, err + } + if seed != nil { + + PRNG := prng.NewPRNGWithSeed(seed) + + cut := func(length int) int { + n := length + for ; n > 1; n-- { + if !PRNG.FlipCoin() { + break + } + } + return n + } + + PRNG.Shuffle(len(cipherSuites), func(i, j int) { + cipherSuites[i], cipherSuites[j] = cipherSuites[j], cipherSuites[i] + }) + cipherSuites = cipherSuites[:cut(len(cipherSuites))] + + for _, ext := range extensions { + switch e := ext.(type) { + case *extension.SupportedSignatureAlgorithms: + + // Limitation: to ensure compatibility with the ECDSA P-256 certificates generated by pion/webrtc, + // https://github.com/pion/webrtc/blob/1df634e1188e06c08fe87753c7bdd576a29e0c92/dtlstransport.go#L84-L92, + // the corresponding signature/hash algorithm needs to remain in the first position. + + e.SignatureHashAlgorithms = append([]signaturehash.Algorithm(nil), e.SignatureHashAlgorithms...) + PRNG.Shuffle(len(e.SignatureHashAlgorithms)-1, func(i, j int) { + e.SignatureHashAlgorithms[i+1], e.SignatureHashAlgorithms[j+1] = + e.SignatureHashAlgorithms[j+1], e.SignatureHashAlgorithms[i+1] + }) + e.SignatureHashAlgorithms = e.SignatureHashAlgorithms[:cut(len(e.SignatureHashAlgorithms))] + + case *extension.SupportedEllipticCurves: + + e.EllipticCurves = append([]elliptic.Curve(nil), e.EllipticCurves...) + PRNG.Shuffle(len(e.EllipticCurves), func(i, j int) { + e.EllipticCurves[i], e.EllipticCurves[j] = + e.EllipticCurves[j], e.EllipticCurves[i] + }) + e.EllipticCurves = e.EllipticCurves[:cut(len(e.EllipticCurves))] + + case *extension.SupportedPointFormats: + + e.PointFormats = append([]elliptic.CurvePointFormat(nil), e.PointFormats...) + PRNG.Shuffle(len(e.PointFormats), func(i, j int) { + e.PointFormats[i], e.PointFormats[j] = + e.PointFormats[j], e.PointFormats[i] + }) + e.PointFormats = e.PointFormats[:cut(len(e.PointFormats))] + + case *extension.UseSRTP: + + e.ProtectionProfiles = append([]SRTPProtectionProfile(nil), e.ProtectionProfiles...) + PRNG.Shuffle(len(e.ProtectionProfiles), func(i, j int) { + e.ProtectionProfiles[i], e.ProtectionProfiles[j] = + e.ProtectionProfiles[j], e.ProtectionProfiles[i] + }) + e.ProtectionProfiles = e.ProtectionProfiles[:cut(len(e.ProtectionProfiles))] + } + } + + PRNG.Shuffle(len(extensions), func(i, j int) { + extensions[i], extensions[j] = extensions[j], extensions[i] + }) + } + + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + SessionID: state.SessionID, + Cookie: state.cookie, + Random: state.localRandom, + CipherSuiteIDs: cipherSuites, + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + }, + }, + }, nil, nil +} diff --git a/replace/dtls/flight2handler.go b/replace/dtls/flight2handler.go new file mode 100644 index 000000000..e765db69e --- /dev/null +++ b/replace/dtls/flight2handler.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + "errors" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight2Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + if !ok { + // Client may retransmit the first ClientHello when HelloVerifyRequest is dropped. + // Parse as flight 0 in this case. + return flight0Parse(ctx, c, state, cache, cfg) + } + state.handshakeRecvSequence = seq + + var clientHello *handshake.MessageClientHello + + // Validate type + if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if !clientHello.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + + if len(clientHello.Cookie) == 0 { + return 0, nil, nil + } + if !bytes.Equal(state.cookie, clientHello.Cookie) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.AccessDenied}, errCookieMismatch + } + return flight4, nil, nil +} + +func flight2Generate(_ context.Context, _ flightConn, state *State, _ *handshakeCache, _ *handshakeConfig) ([]*packet, *alert.Alert, error) { + + // [Psiphon] + // With SetDTLSInsecureSkipHelloVerify set, this should never be called, + // so handshake randomization is not implemented here. + return nil, nil, errors.New("unexpected flight2Generate call") + + state.handshakeSendSequence = 0 + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageHelloVerifyRequest{ + Version: protocol.Version1_2, + Cookie: state.cookie, + }, + }, + }, + }, + }, nil, nil +} diff --git a/replace/dtls/flight3handler.go b/replace/dtls/flight3handler.go new file mode 100644 index 000000000..34046c0d1 --- /dev/null +++ b/replace/dtls/flight3handler.go @@ -0,0 +1,298 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + "errors" + + "github.com/pion/dtls/v2/internal/ciphersuite/types" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight3Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit + // Clients may receive multiple HelloVerifyRequest messages with different cookies. + // Clients SHOULD handle this by sending a new ClientHello with a cookie in response + // to the new HelloVerifyRequest. RFC 6347 Section 4.2.1 + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true}, + ) + if ok { + if h, msgOk := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); msgOk { + // DTLS 1.2 clients must not assume that the server will use the protocol version + // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1 + if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + state.cookie = append([]byte{}, h.Cookie...) + state.handshakeRecvSequence = seq + return flight3, nil, nil + } + } + + _, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + ) + if !ok { + // Don't have enough messages. Keep reading + return 0, nil, nil + } + + if h, msgOk := msgs[handshake.TypeServerHello].(*handshake.MessageServerHello); msgOk { + if !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + for _, v := range h.Extensions { + switch e := v.(type) { + case *extension.UseSRTP: + profile, found := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles) + if !found { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, errClientNoMatchingSRTPProfile + } + state.setSRTPProtectionProfile(profile) + case *extension.UseExtendedMasterSecret: + if cfg.extendedMasterSecret != DisableExtendedMasterSecret { + state.extendedMasterSecret = true + } + case *extension.ALPN: + if len(e.ProtocolNameList) > 1 { // This should be exactly 1, the zero case is handle when unmarshalling + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, extension.ErrALPNInvalidFormat // Meh, internal error? + } + state.NegotiatedProtocol = e.ProtocolNameList[0] + } + } + if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errClientRequiredButNoServerEMS + } + if len(cfg.localSRTPProtectionProfiles) > 0 && state.getSRTPProtectionProfile() == 0 { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errRequestedButNoSRTPExtension + } + + remoteCipherSuite := cipherSuiteForID(CipherSuiteID(*h.CipherSuiteID), cfg.customCipherSuites) + if remoteCipherSuite == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection + } + + selectedCipherSuite, found := findMatchingCipherSuite([]CipherSuite{remoteCipherSuite}, cfg.localCipherSuites) + if !found { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + + state.cipherSuite = selectedCipherSuite + state.remoteRandom = h.Random + cfg.log.Tracef("[handshake] use cipher suite: %s", selectedCipherSuite.String()) + + if len(h.SessionID) > 0 && bytes.Equal(state.SessionID, h.SessionID) { + return handleResumption(ctx, c, state, cache, cfg) + } + + if len(state.SessionID) > 0 { + cfg.log.Tracef("[handshake] clean old session : %s", state.SessionID) + if err := cfg.sessionStore.Del(state.SessionID); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if cfg.sessionStore == nil { + state.SessionID = []byte{} + } else { + state.SessionID = h.SessionID + } + + state.masterSecret = []byte{} + } + + if cfg.localPSKCallback != nil { + seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + ) + } else { + seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + ) + } + if !ok { + // Don't have enough messages. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + if h, ok := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); ok { + state.PeerCertificates = h.Certificate + } else if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errInvalidCertificate + } + + if h, ok := msgs[handshake.TypeServerKeyExchange].(*handshake.MessageServerKeyExchange); ok { + alertPtr, err := handleServerKeyExchange(c, state, cfg, h) + if err != nil { + return 0, alertPtr, err + } + } + + if _, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok { + state.remoteRequestedCertificate = true + } + + return flight5, nil, nil +} + +func handleResumption(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + if err := state.initCipherSuite(); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + // Now, encrypted packets can be handled + if err := c.handleQueuedPackets(ctx); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + ) + + expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + clientRandom := state.localRandom.MarshalFixed() + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return flight5b, nil, nil +} + +func handleServerKeyExchange(_ flightConn, state *State, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange) (*alert.Alert, error) { + var err error + if state.cipherSuite == nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + if cfg.localPSKCallback != nil { + var psk []byte + if psk, err = cfg.localPSKCallback(h.IdentityHint); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.IdentityHint = h.IdentityHint + switch state.cipherSuite.KeyExchangeAlgorithm() { + case types.KeyExchangeAlgorithmPsk: + state.preMasterSecret = prf.PSKPreMasterSecret(psk) + case (types.KeyExchangeAlgorithmEcdhe | types.KeyExchangeAlgorithmPsk): + if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + default: + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + } else { + if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + if state.preMasterSecret, err = prf.PreMasterSecret(h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + return nil, nil //nolint:nilnil +} + +func flight3Generate(_ context.Context, _ flightConn, state *State, _ *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + + // [Psiphon] + // With SetDTLSInsecureSkipHelloVerify set, this should never be called, + // so handshake randomization is not implemented here. + return nil, nil, errors.New("unexpected flight3Generate call") + + extensions := []extension.Extension{ + &extension.SupportedSignatureAlgorithms{ + SignatureHashAlgorithms: cfg.localSignatureSchemes, + }, + &extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }, + } + if state.namedCurve != 0 { + extensions = append(extensions, []extension.Extension{ + &extension.SupportedEllipticCurves{ + EllipticCurves: []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384}, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + }...) + } + + if len(cfg.localSRTPProtectionProfiles) > 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: cfg.localSRTPProtectionProfiles, + }) + } + + if cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + + if len(cfg.serverName) > 0 { + extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName}) + } + + if len(cfg.supportedProtocols) > 0 { + extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols}) + } + + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + SessionID: state.SessionID, + Cookie: state.cookie, + Random: state.localRandom, + CipherSuiteIDs: cipherSuiteIDs(cfg.localCipherSuites), + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + }, + }, + }, nil, nil +} diff --git a/replace/dtls/flight4bhandler.go b/replace/dtls/flight4bhandler.go new file mode 100644 index 000000000..3f12c9821 --- /dev/null +++ b/replace/dtls/flight4bhandler.go @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight4bParse(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + + expectedVerifyData, err := prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + // Other party may re-transmit the last flight. Keep state to be flight4b. + return flight4b, nil, nil +} + +func flight4bGenerate(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var pkts []*packet + + extensions := []extension.Extension{&extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }} + if (cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + if state.getSRTPProtectionProfile() != 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: []SRTPProtectionProfile{state.getSRTPProtectionProfile()}, + }) + } + + selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err + } + if selectedProto != "" { + extensions = append(extensions, &extension.ALPN{ + ProtocolNameList: []string{selectedProto}, + }) + state.NegotiatedProtocol = selectedProto + } + + cipherSuiteID := uint16(state.cipherSuite.ID()) + serverHello := &handshake.Handshake{ + Message: &handshake.MessageServerHello{ + Version: protocol.Version1_2, + Random: state.localRandom, + SessionID: state.SessionID, + CipherSuiteID: &cipherSuiteID, + CompressionMethod: defaultCompressionMethods()[0], + Extensions: extensions, + }, + } + + serverHello.Header.MessageSequence = uint16(state.handshakeSendSequence) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + raw, err := serverHello.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + plainText = append(plainText, raw...) + + state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: serverHello, + }, + }, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }, + ) + + return pkts, nil, nil +} diff --git a/replace/dtls/flight4handler.go b/replace/dtls/flight4handler.go new file mode 100644 index 000000000..b2a6278d0 --- /dev/null +++ b/replace/dtls/flight4handler.go @@ -0,0 +1,422 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "crypto/rand" + "crypto/x509" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + + inproxy_dtls "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy/dtls" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng" +) + +func flight4Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, true}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, true}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + // Validate type + var clientKeyExchange *handshake.MessageClientKeyExchange + if clientKeyExchange, ok = msgs[handshake.TypeClientKeyExchange].(*handshake.MessageClientKeyExchange); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if h, hasCert := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); hasCert { + state.PeerCertificates = h.Certificate + // If the client offer its certificate, just disable session resumption. + // Otherwise, we have to store the certificate identitfication and expire time. + // And we have to check whether this certificate expired, revoked or changed. + // + // https://curl.se/docs/CVE-2016-5419.html + state.SessionID = nil + } + + if h, hasCertVerify := msgs[handshake.TypeCertificateVerify].(*handshake.MessageCertificateVerify); hasCertVerify { + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errCertificateVerifyNoCertificate + } + + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + ) + + // Verify that the pair of hash algorithm and signiture is listed. + var validSignatureScheme bool + for _, ss := range cfg.localSignatureSchemes { + if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm { + validSignatureScheme = true + break + } + } + if !validSignatureScheme { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes + } + + if err := verifyCertificateVerify(plainText, h.HashAlgorithm, h.Signature, state.PeerCertificates); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + var chains [][]*x509.Certificate + var err error + var verified bool + if cfg.clientAuth >= VerifyClientCertIfGiven { + if chains, err = verifyClientCert(state.PeerCertificates, cfg.clientCAs); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + verified = true + } + if cfg.verifyPeerCertificate != nil { + if err := cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + state.peerCertificatesVerified = verified + } else if state.PeerCertificates != nil { + // A certificate was received, but we haven't seen a CertificateVerify + // keep reading until we receive one + return 0, nil, nil + } + + if !state.cipherSuite.IsInitialized() { + serverRandom := state.localRandom.MarshalFixed() + clientRandom := state.remoteRandom.MarshalFixed() + + var err error + var preMasterSecret []byte + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey { + var psk []byte + if psk, err = cfg.localPSKCallback(clientKeyExchange.IdentityHint); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.IdentityHint = clientKeyExchange.IdentityHint + switch state.cipherSuite.KeyExchangeAlgorithm() { + case CipherSuiteKeyExchangeAlgorithmPsk: + preMasterSecret = prf.PSKPreMasterSecret(psk) + case (CipherSuiteKeyExchangeAlgorithmPsk | CipherSuiteKeyExchangeAlgorithmEcdhe): + if preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + default: + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidCipherSuite + } + } else { + preMasterSecret, err = prf.PreMasterSecret(clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } + + if state.extendedMasterSecret { + var sessionHash []byte + sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + state.masterSecret, err = prf.ExtendedMasterSecret(preMasterSecret, sessionHash, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } else { + state.masterSecret, err = prf.MasterSecret(preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if err := state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], false); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + } + + if len(state.SessionID) > 0 { + s := Session{ + ID: state.SessionID, + Secret: state.masterSecret, + } + cfg.log.Tracef("[handshake] save new session: %x", s.ID) + if err := cfg.sessionStore.Set(state.SessionID, s); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + // Now, encrypted packets can be handled + if err := c.handleQueuedPackets(ctx); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + seq, msgs, ok = cache.fullPullMap(seq, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous { + if cfg.verifyConnection != nil { + if err := cfg.verifyConnection(state.clone()); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + return flight6, nil, nil + } + + switch cfg.clientAuth { + case RequireAnyClientCert: + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired + } + case VerifyClientCertIfGiven: + if state.PeerCertificates != nil && !state.peerCertificatesVerified { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified + } + case RequireAndVerifyClientCert: + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired + } + if !state.peerCertificatesVerified { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified + } + case NoClientCert, RequestClientCert: + // go to flight6 + } + if cfg.verifyConnection != nil { + if err := cfg.verifyConnection(state.clone()); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + + return flight6, nil, nil +} + +func flight4Generate(ctx context.Context, c flightConn, state *State, _ *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + extensions := []extension.Extension{&extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }} + if (cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + if state.getSRTPProtectionProfile() != 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: []SRTPProtectionProfile{state.getSRTPProtectionProfile()}, + }) + } + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + extensions = append(extensions, &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }) + } + + selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err + } + if selectedProto != "" { + extensions = append(extensions, &extension.ALPN{ + ProtocolNameList: []string{selectedProto}, + }) + state.NegotiatedProtocol = selectedProto + } + + var pkts []*packet + cipherSuiteID := uint16(state.cipherSuite.ID()) + + if cfg.sessionStore != nil { + state.SessionID = make([]byte, sessionLength) + if _, err := rand.Read(state.SessionID); err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + // [Psiphon] + // Randomize ServerHello + seed, err := inproxy_dtls.GetDTLSSeed(ctx) + if err != nil { + return nil, nil, err + } + if seed != nil { + PRNG := prng.NewPRNGWithSeed(seed) + PRNG.Shuffle(len(extensions), func(i, j int) { + extensions[i], extensions[j] = extensions[j], extensions[i] + }) + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerHello{ + Version: protocol.Version1_2, + Random: state.localRandom, + SessionID: state.SessionID, + CipherSuiteID: &cipherSuiteID, + CompressionMethod: defaultCompressionMethods()[0], + Extensions: extensions, + }, + }, + }, + }) + + switch { + case state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate: + certificate, err := cfg.getCertificate(&ClientHelloInfo{ + ServerName: state.serverName, + CipherSuites: []ciphersuite.ID{state.cipherSuite.ID()}, + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + RandomBytes: state.remoteRandom.RandomBytes, + }) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificate{ + Certificate: certificate.Certificate, + }, + }, + }, + }) + + serverRandom := state.localRandom.MarshalFixed() + clientRandom := state.remoteRandom.MarshalFixed() + + // Find compatible signature scheme + signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, certificate.PrivateKey) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err + } + + signature, err := generateKeySignature(clientRandom[:], serverRandom[:], state.localKeypair.PublicKey, state.namedCurve, certificate.PrivateKey, signatureHashAlgo.Hash) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.localKeySignature = signature + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerKeyExchange{ + EllipticCurveType: elliptic.CurveTypeNamedCurve, + NamedCurve: state.namedCurve, + PublicKey: state.localKeypair.PublicKey, + HashAlgorithm: signatureHashAlgo.Hash, + SignatureAlgorithm: signatureHashAlgo.Signature, + Signature: state.localKeySignature, + }, + }, + }, + }) + + if cfg.clientAuth > NoClientCert { + // An empty list of certificateAuthorities signals to + // the client that it may send any certificate in response + // to our request. When we know the CAs we trust, then + // we can send them down, so that the client can choose + // an appropriate certificate to give to us. + var certificateAuthorities [][]byte + if cfg.clientCAs != nil { + // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool and it's ok if certificate authorities is empty. + certificateAuthorities = cfg.clientCAs.Subjects() + } + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificateRequest{ + CertificateTypes: []clientcertificate.Type{clientcertificate.RSASign, clientcertificate.ECDSASign}, + SignatureHashAlgorithms: cfg.localSignatureSchemes, + CertificateAuthoritiesNames: certificateAuthorities, + }, + }, + }, + }) + } + case cfg.localPSKIdentityHint != nil || state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe): + // To help the client in selecting which identity to use, the server + // can provide a "PSK identity hint" in the ServerKeyExchange message. + // If no hint is provided and cipher suite doesn't use elliptic curve, + // the ServerKeyExchange message is omitted. + // + // https://tools.ietf.org/html/rfc4279#section-2 + srvExchange := &handshake.MessageServerKeyExchange{ + IdentityHint: cfg.localPSKIdentityHint, + } + if state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe) { + srvExchange.EllipticCurveType = elliptic.CurveTypeNamedCurve + srvExchange.NamedCurve = state.namedCurve + srvExchange.PublicKey = state.localKeypair.PublicKey + } + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: srvExchange, + }, + }, + }) + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerHelloDone{}, + }, + }, + }) + + return pkts, nil, nil +} diff --git a/replace/dtls/flight4handler_test.go b/replace/dtls/flight4handler_test.go new file mode 100644 index 000000000..318a05826 --- /dev/null +++ b/replace/dtls/flight4handler_test.go @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "testing" + "time" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/transport/v2/test" +) + +type flight4TestMockFlightConn struct{} + +func (f *flight4TestMockFlightConn) notify(context.Context, alert.Level, alert.Description) error { + return nil +} +func (f *flight4TestMockFlightConn) writePackets(context.Context, []*packet) error { return nil } +func (f *flight4TestMockFlightConn) recvHandshake() <-chan chan struct{} { return nil } +func (f *flight4TestMockFlightConn) setLocalEpoch(uint16) {} +func (f *flight4TestMockFlightConn) handleQueuedPackets(context.Context) error { return nil } +func (f *flight4TestMockFlightConn) sessionKey() []byte { return nil } + +type flight4TestMockCipherSuite struct { + ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256 + + t *testing.T +} + +func (f *flight4TestMockCipherSuite) IsInitialized() bool { + f.t.Fatal("IsInitialized called with Certificate but not CertificateVerify") + return true +} + +// Assert that if a Client sends a certificate they +// must also send a CertificateVerify message. +// The flight4handler must not interact with the CipherSuite +// if the CertificateVerify is missing +func TestFlight4_Process_CertificateVerify(t *testing.T) { + // Limit runtime in case of deadlocks + lim := test.TimeOut(5 * time.Second) + defer lim.Stop() + + // Check for leaking routines + report := test.CheckRoutines(t) + defer report() + + mockConn := &flight4TestMockFlightConn{} + state := &State{ + cipherSuite: &flight4TestMockCipherSuite{t: t}, + } + cache := newHandshakeCache() + cfg := &handshakeConfig{} + + rawCertificate := []byte{ + 0x0b, 0x00, 0x01, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x9b, 0x00, 0x01, 0x98, 0x00, 0x01, 0x95, 0x30, 0x82, + 0x01, 0x91, 0x30, 0x82, 0x01, 0x38, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x11, 0x01, 0x65, 0x03, 0x3f, 0x4d, 0x0b, 0x9a, + 0x62, 0x91, 0xdb, 0x4d, 0x28, 0x2c, 0x1f, 0xd6, 0x73, 0x32, + 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, + 0x03, 0x02, 0x30, 0x00, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x32, + 0x30, 0x35, 0x31, 0x35, 0x31, 0x38, 0x34, 0x33, 0x35, 0x35, + 0x5a, 0x17, 0x0d, 0x32, 0x32, 0x30, 0x36, 0x31, 0x35, 0x31, + 0x38, 0x34, 0x33, 0x35, 0x35, 0x5a, 0x30, 0x00, 0x30, 0x59, + 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, + 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, + 0x07, 0x03, 0x42, 0x00, 0x04, 0xc3, 0xb7, 0x13, 0x1a, 0x0a, + 0xfc, 0xd0, 0x82, 0xf8, 0x94, 0x5e, 0xc0, 0x77, 0x07, 0x81, + 0x28, 0xc9, 0xcb, 0x08, 0x84, 0x50, 0x6b, 0xf0, 0x22, 0xe8, + 0x79, 0xb9, 0x15, 0x33, 0xc4, 0x56, 0xa1, 0xd3, 0x1b, 0x24, + 0xe3, 0x61, 0xbd, 0x4d, 0x65, 0x80, 0x6b, 0x5d, 0x96, 0x48, + 0xa2, 0x44, 0x9e, 0xce, 0xe8, 0x65, 0xd6, 0x3c, 0xe0, 0x9b, + 0x6b, 0xa1, 0x36, 0x34, 0xb2, 0x39, 0xe2, 0x03, 0x00, 0xa3, + 0x81, 0x92, 0x30, 0x81, 0x8f, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, 0x02, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x16, + 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, + 0x03, 0x02, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, + 0x03, 0x01, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x01, + 0x01, 0xff, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, + 0xb1, 0x1a, 0xe3, 0xeb, 0x6f, 0x7c, 0xc3, 0x8f, 0xba, 0x6f, + 0x1c, 0xe8, 0xf0, 0x23, 0x08, 0x50, 0x8d, 0x3c, 0xea, 0x31, + 0x30, 0x2e, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x01, 0x01, 0xff, + 0x04, 0x24, 0x30, 0x22, 0x82, 0x20, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x0a, + 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, + 0x03, 0x47, 0x00, 0x30, 0x44, 0x02, 0x20, 0x06, 0x31, 0x43, + 0xac, 0x03, 0x45, 0x79, 0x3c, 0xd7, 0x5f, 0x6e, 0x6a, 0xf8, + 0x0e, 0xfd, 0x35, 0x49, 0xee, 0x1b, 0xbc, 0x47, 0xce, 0xe3, + 0x39, 0xec, 0xe4, 0x62, 0xe1, 0x30, 0x1a, 0xa1, 0x89, 0x02, + 0x20, 0x35, 0xcd, 0x7a, 0x15, 0x68, 0x09, 0x50, 0x49, 0x9e, + 0x3e, 0x05, 0xd7, 0xc2, 0x69, 0x3f, 0x9c, 0x0c, 0x98, 0x92, + 0x65, 0xec, 0xae, 0x44, 0xfe, 0xe5, 0x68, 0xb8, 0x09, 0x78, + 0x7f, 0x6b, 0x77, + } + + rawClientKeyExchange := []byte{ + 0x10, 0x00, 0x00, 0x21, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x21, 0x20, 0x96, 0xed, 0x0c, 0xee, 0xf3, 0x11, 0xb1, + 0x9d, 0x8b, 0x1c, 0x02, 0x7f, 0x06, 0x7c, 0x57, 0x7a, 0x14, + 0xa6, 0x41, 0xde, 0x63, 0x57, 0x9e, 0xcd, 0x34, 0x54, 0xba, + 0x37, 0x4d, 0x34, 0x15, 0x18, + } + + cache.push(rawCertificate, 0, 0, handshake.TypeCertificate, true) + cache.push(rawClientKeyExchange, 0, 1, handshake.TypeClientKeyExchange, true) + + if _, _, err := flight4Parse(context.TODO(), mockConn, state, cache, cfg); err != nil { + t.Fatal(err) + } +} diff --git a/replace/dtls/flight5bhandler.go b/replace/dtls/flight5bhandler.go new file mode 100644 index 000000000..1ad1cdbfb --- /dev/null +++ b/replace/dtls/flight5bhandler.go @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight5bParse(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + // Other party may re-transmit the last flight. Keep state to be flight5b. + return flight5b, nil, nil +} + +func flight5bGenerate(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit + var pkts []*packet + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }) + + return pkts, nil, nil +} diff --git a/replace/dtls/flight5handler.go b/replace/dtls/flight5handler.go new file mode 100644 index 000000000..acc09473f --- /dev/null +++ b/replace/dtls/flight5handler.go @@ -0,0 +1,357 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight5Parse(_ context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + if len(state.SessionID) > 0 { + s := Session{ + ID: state.SessionID, + Secret: state.masterSecret, + } + cfg.log.Tracef("[handshake] save new session: %x", s.ID) + if err := cfg.sessionStore.Set(c.sessionKey(), s); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + return flight5, nil, nil +} + +func flight5Generate(_ context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit + var privateKey crypto.PrivateKey + var pkts []*packet + if state.remoteRequestedCertificate { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-2, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired + } + reqInfo := CertificateRequestInfo{} + if r, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok { + reqInfo.AcceptableCAs = r.CertificateAuthoritiesNames + } else { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired + } + certificate, err := cfg.getClientCertificate(&reqInfo) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err + } + if certificate == nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errNotAcceptableCertificateChain + } + if certificate.Certificate != nil { + privateKey = certificate.PrivateKey + } + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificate{ + Certificate: certificate.Certificate, + }, + }, + }, + }) + } + + clientKeyExchange := &handshake.MessageClientKeyExchange{} + if cfg.localPSKCallback == nil { + clientKeyExchange.PublicKey = state.localKeypair.PublicKey + } else { + clientKeyExchange.IdentityHint = cfg.localPSKIdentityHint + } + if state != nil && state.localKeypair != nil && len(state.localKeypair.PublicKey) > 0 { + clientKeyExchange.PublicKey = state.localKeypair.PublicKey + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: clientKeyExchange, + }, + }, + }) + + serverKeyExchangeData := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + ) + + serverKeyExchange := &handshake.MessageServerKeyExchange{} + + // handshakeMessageServerKeyExchange is optional for PSK + if len(serverKeyExchangeData) == 0 { + alertPtr, err := handleServerKeyExchange(c, state, cfg, &handshake.MessageServerKeyExchange{}) + if err != nil { + return nil, alertPtr, err + } + } else { + rawHandshake := &handshake.Handshake{ + KeyExchangeAlgorithm: state.cipherSuite.KeyExchangeAlgorithm(), + } + err := rawHandshake.Unmarshal(serverKeyExchangeData) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, err + } + + switch h := rawHandshake.Message.(type) { + case *handshake.MessageServerKeyExchange: + serverKeyExchange = h + default: + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errInvalidContentType + } + } + + // Append not-yet-sent packets + merged := []byte{} + seqPred := uint16(state.handshakeSendSequence) + for _, p := range pkts { + h, ok := p.record.Content.(*handshake.Handshake) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType + } + h.Header.MessageSequence = seqPred + seqPred++ + raw, err := h.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + merged = append(merged, raw...) + } + + if alertPtr, err := initalizeCipherSuite(state, cache, cfg, serverKeyExchange, merged); err != nil { + return nil, alertPtr, err + } + + // If the client has sent a certificate with signing ability, a digitally-signed + // CertificateVerify message is sent to explicitly verify possession of the + // private key in the certificate. + if state.remoteRequestedCertificate && privateKey != nil { + plainText := append(cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + ), merged...) + + // Find compatible signature scheme + signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, privateKey) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err + } + + certVerify, err := generateCertificateVerify(plainText, privateKey, signatureHashAlgo.Hash) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.localCertificatesVerify = certVerify + + p := &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificateVerify{ + HashAlgorithm: signatureHashAlgo.Hash, + SignatureAlgorithm: signatureHashAlgo.Signature, + Signature: state.localCertificatesVerify, + }, + }, + }, + } + pkts = append(pkts, p) + + h, ok := p.record.Content.(*handshake.Handshake) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType + } + h.Header.MessageSequence = seqPred + // seqPred++ // this is the last use of seqPred + raw, err := h.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + merged = append(merged, raw...) + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, append(plainText, merged...), state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }) + + return pkts, nil, nil +} + +func initalizeCipherSuite(state *State, cache *handshakeCache, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange, sendingPlainText []byte) (*alert.Alert, error) { //nolint:gocognit + if state.cipherSuite.IsInitialized() { + return nil, nil //nolint + } + + clientRandom := state.localRandom.MarshalFixed() + serverRandom := state.remoteRandom.MarshalFixed() + + var err error + + if state.extendedMasterSecret { + var sessionHash []byte + sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch, sendingPlainText) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + state.masterSecret, err = prf.ExtendedMasterSecret(state.preMasterSecret, sessionHash, state.cipherSuite.HashFunc()) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } else { + state.masterSecret, err = prf.MasterSecret(state.preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc()) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + // Verify that the pair of hash algorithm and signiture is listed. + var validSignatureScheme bool + for _, ss := range cfg.localSignatureSchemes { + if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm { + validSignatureScheme = true + break + } + } + if !validSignatureScheme { + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes + } + + expectedMsg := valueKeyMessage(clientRandom[:], serverRandom[:], h.PublicKey, h.NamedCurve) + if err = verifyKeySignature(expectedMsg, h.Signature, h.HashAlgorithm, state.PeerCertificates); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + var chains [][]*x509.Certificate + if !cfg.insecureSkipVerify { + if chains, err = verifyServerCert(state.PeerCertificates, cfg.rootCAs, cfg.serverName); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + if cfg.verifyPeerCertificate != nil { + if err = cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + } + if cfg.verifyConnection != nil { + if err = cfg.verifyConnection(state.clone()); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + + if err = state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], true); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return nil, nil //nolint +} diff --git a/replace/dtls/flight6handler.go b/replace/dtls/flight6handler.go new file mode 100644 index 000000000..59a14ae8e --- /dev/null +++ b/replace/dtls/flight6handler.go @@ -0,0 +1,85 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight6Parse(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + // Other party may re-transmit the last flight. Keep state to be flight6. + return flight6, nil, nil +} + +func flight6Generate(_ context.Context, _ flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var pkts []*packet + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }, + ) + return pkts, nil, nil +} diff --git a/replace/dtls/flighthandler.go b/replace/dtls/flighthandler.go new file mode 100644 index 000000000..2c4987680 --- /dev/null +++ b/replace/dtls/flighthandler.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/protocol/alert" +) + +// Parse received handshakes and return next flightVal +type flightParser func(context.Context, flightConn, *State, *handshakeCache, *handshakeConfig) (flightVal, *alert.Alert, error) + +// Generate flights +// +// [Psiphon] +// Pass in dial context for GetDTLSSeed. +type flightGenerator func(context.Context, flightConn, *State, *handshakeCache, *handshakeConfig) ([]*packet, *alert.Alert, error) + +func (f flightVal) getFlightParser() (flightParser, error) { + switch f { + case flight0: + return flight0Parse, nil + case flight1: + return flight1Parse, nil + case flight2: + return flight2Parse, nil + case flight3: + return flight3Parse, nil + case flight4: + return flight4Parse, nil + case flight4b: + return flight4bParse, nil + case flight5: + return flight5Parse, nil + case flight5b: + return flight5bParse, nil + case flight6: + return flight6Parse, nil + default: + return nil, errInvalidFlight + } +} + +func (f flightVal) getFlightGenerator() (gen flightGenerator, retransmit bool, err error) { + switch f { + case flight0: + return flight0Generate, true, nil + case flight1: + return flight1Generate, true, nil + case flight2: + // https://tools.ietf.org/html/rfc6347#section-3.2.1 + // HelloVerifyRequests must not be retransmitted. + return flight2Generate, false, nil + case flight3: + return flight3Generate, true, nil + case flight4: + return flight4Generate, true, nil + case flight4b: + return flight4bGenerate, true, nil + case flight5: + return flight5Generate, true, nil + case flight5b: + return flight5bGenerate, true, nil + case flight6: + return flight6Generate, true, nil + default: + return nil, false, errInvalidFlight + } +} diff --git a/replace/dtls/fragment_buffer.go b/replace/dtls/fragment_buffer.go new file mode 100644 index 000000000..f20033758 --- /dev/null +++ b/replace/dtls/fragment_buffer.go @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// 2 megabytes +const fragmentBufferMaxSize = 2000000 + +type fragment struct { + recordLayerHeader recordlayer.Header + handshakeHeader handshake.Header + data []byte +} + +type fragmentBuffer struct { + // map of MessageSequenceNumbers that hold slices of fragments + cache map[uint16][]*fragment + + currentMessageSequenceNumber uint16 +} + +func newFragmentBuffer() *fragmentBuffer { + return &fragmentBuffer{cache: map[uint16][]*fragment{}} +} + +// current total size of buffer +func (f *fragmentBuffer) size() int { + size := 0 + for i := range f.cache { + for j := range f.cache[i] { + size += len(f.cache[i][j].data) + } + } + return size +} + +// Attempts to push a DTLS packet to the fragmentBuffer +// when it returns true it means the fragmentBuffer has inserted and the buffer shouldn't be handled +// when an error returns it is fatal, and the DTLS connection should be stopped +func (f *fragmentBuffer) push(buf []byte) (bool, error) { + if f.size()+len(buf) >= fragmentBufferMaxSize { + return false, errFragmentBufferOverflow + } + + frag := new(fragment) + if err := frag.recordLayerHeader.Unmarshal(buf); err != nil { + return false, err + } + + // fragment isn't a handshake, we don't need to handle it + if frag.recordLayerHeader.ContentType != protocol.ContentTypeHandshake { + return false, nil + } + + for buf = buf[recordlayer.HeaderSize:]; len(buf) != 0; frag = new(fragment) { + if err := frag.handshakeHeader.Unmarshal(buf); err != nil { + return false, err + } + + if _, ok := f.cache[frag.handshakeHeader.MessageSequence]; !ok { + f.cache[frag.handshakeHeader.MessageSequence] = []*fragment{} + } + + // end index should be the length of handshake header but if the handshake + // was fragmented, we should keep them all + end := int(handshake.HeaderLength + frag.handshakeHeader.Length) + if size := len(buf); end > size { + end = size + } + + // Discard all headers, when rebuilding the packet we will re-build + frag.data = append([]byte{}, buf[handshake.HeaderLength:end]...) + f.cache[frag.handshakeHeader.MessageSequence] = append(f.cache[frag.handshakeHeader.MessageSequence], frag) + buf = buf[end:] + } + + return true, nil +} + +func (f *fragmentBuffer) pop() (content []byte, epoch uint16) { + frags, ok := f.cache[f.currentMessageSequenceNumber] + if !ok { + return nil, 0 + } + + // Go doesn't support recursive lambdas + var appendMessage func(targetOffset uint32) bool + + rawMessage := []byte{} + appendMessage = func(targetOffset uint32) bool { + for _, f := range frags { + if f.handshakeHeader.FragmentOffset == targetOffset { + fragmentEnd := (f.handshakeHeader.FragmentOffset + f.handshakeHeader.FragmentLength) + if fragmentEnd != f.handshakeHeader.Length && f.handshakeHeader.FragmentLength != 0 { + if !appendMessage(fragmentEnd) { + return false + } + } + + rawMessage = append(f.data, rawMessage...) + return true + } + } + return false + } + + // Recursively collect up + if !appendMessage(0) { + return nil, 0 + } + + firstHeader := frags[0].handshakeHeader + firstHeader.FragmentOffset = 0 + firstHeader.FragmentLength = firstHeader.Length + + rawHeader, err := firstHeader.Marshal() + if err != nil { + return nil, 0 + } + + messageEpoch := frags[0].recordLayerHeader.Epoch + + delete(f.cache, f.currentMessageSequenceNumber) + f.currentMessageSequenceNumber++ + return append(rawHeader, rawMessage...), messageEpoch +} diff --git a/replace/dtls/fragment_buffer_test.go b/replace/dtls/fragment_buffer_test.go new file mode 100644 index 000000000..ad8834e71 --- /dev/null +++ b/replace/dtls/fragment_buffer_test.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "errors" + "reflect" + "testing" +) + +func TestFragmentBuffer(t *testing.T) { + for _, test := range []struct { + Name string + In [][]byte + Expected [][]byte + Epoch uint16 + }{ + { + Name: "Single Fragment", + In: [][]byte{ + {0x16, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0xff, 0x00}, + }, + Expected: [][]byte{ + {0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0xff, 0x00}, + }, + Epoch: 0, + }, + { + Name: "Single Fragment Epoch 3", + In: [][]byte{ + {0x16, 0xfe, 0xff, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0xff, 0x00}, + }, + Expected: [][]byte{ + {0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0xff, 0x00}, + }, + Epoch: 3, + }, + { + Name: "Multiple Fragments", + In: [][]byte{ + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x02, 0x03, 0x04}, + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x05, 0x05, 0x06, 0x07, 0x08, 0x09}, + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x05, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E}, + }, + Expected: [][]byte{ + {0x0b, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e}, + }, + Epoch: 0, + }, + { + Name: "Multiple Unordered Fragments", + In: [][]byte{ + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x02, 0x03, 0x04}, + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x05, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E}, + {0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x81, 0x0b, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x05, 0x05, 0x06, 0x07, 0x08, 0x09}, + }, + Expected: [][]byte{ + {0x0b, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e}, + }, + Epoch: 0, + }, + { + Name: "Multiple Handshakes in Single Fragment", + In: [][]byte{ + { + 0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x30, /* record header */ + 0x03, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01, /*handshake msg 1*/ + 0x03, 0x00, 0x00, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01, /*handshake msg 2*/ + 0x03, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01, /*handshake msg 3*/ + }, + }, + Expected: [][]byte{ + {0x03, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01}, + {0x03, 0x00, 0x00, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01}, + {0x03, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xfe, 0xff, 0x01, 0x01}, + }, + Epoch: 0, + }, + // Assert that a zero length fragment doesn't cause the fragmentBuffer to enter an infinite loop + { + Name: "Zero Length Fragment", + In: [][]byte{ + { + 0x16, 0xfe, 0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + Expected: [][]byte{ + {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00}, + }, + Epoch: 0, + }, + } { + fragmentBuffer := newFragmentBuffer() + for _, frag := range test.In { + status, err := fragmentBuffer.push(frag) + if err != nil { + t.Error(err) + } else if !status { + t.Errorf("fragmentBuffer didn't accept fragments for '%s'", test.Name) + } + } + + for _, expected := range test.Expected { + out, epoch := fragmentBuffer.pop() + if !reflect.DeepEqual(out, expected) { + t.Errorf("fragmentBuffer '%s' push/pop: got % 02x, want % 02x", test.Name, out, expected) + } + if epoch != test.Epoch { + t.Errorf("fragmentBuffer returned wrong epoch: got %d, want %d", epoch, test.Epoch) + } + } + + if frag, _ := fragmentBuffer.pop(); frag != nil { + t.Errorf("fragmentBuffer popped single buffer multiple times for '%s'", test.Name) + } + } +} + +func TestFragmentBuffer_Overflow(t *testing.T) { + fragmentBuffer := newFragmentBuffer() + + // Push a buffer that doesn't exceed size limits + if _, err := fragmentBuffer.push([]byte{0x16, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0xff, 0x00}); err != nil { + t.Fatal(err) + } + + // Allocate a buffer that exceeds cache size + largeBuffer := make([]byte, fragmentBufferMaxSize) + if _, err := fragmentBuffer.push(largeBuffer); !errors.Is(err, errFragmentBufferOverflow) { + t.Fatalf("Pushing a large buffer returned (%s) expected(%s)", err, errFragmentBufferOverflow) + } +} diff --git a/replace/dtls/go.mod b/replace/dtls/go.mod new file mode 100644 index 000000000..9d0d65615 --- /dev/null +++ b/replace/dtls/go.mod @@ -0,0 +1,10 @@ +module github.com/pion/dtls/v2 + +require ( + github.com/pion/logging v0.2.2 + github.com/pion/transport/v2 v2.2.1 + golang.org/x/crypto v0.8.0 + golang.org/x/net v0.9.0 +) + +go 1.13 diff --git a/replace/dtls/handshake_cache.go b/replace/dtls/handshake_cache.go new file mode 100644 index 000000000..8d5960568 --- /dev/null +++ b/replace/dtls/handshake_cache.go @@ -0,0 +1,172 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "sync" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +type handshakeCacheItem struct { + typ handshake.Type + isClient bool + epoch uint16 + messageSequence uint16 + data []byte +} + +type handshakeCachePullRule struct { + typ handshake.Type + epoch uint16 + isClient bool + optional bool +} + +type handshakeCache struct { + cache []*handshakeCacheItem + mu sync.Mutex +} + +func newHandshakeCache() *handshakeCache { + return &handshakeCache{} +} + +func (h *handshakeCache) push(data []byte, epoch, messageSequence uint16, typ handshake.Type, isClient bool) { + h.mu.Lock() + defer h.mu.Unlock() + + h.cache = append(h.cache, &handshakeCacheItem{ + data: append([]byte{}, data...), + epoch: epoch, + messageSequence: messageSequence, + typ: typ, + isClient: isClient, + }) +} + +// returns a list handshakes that match the requested rules +// the list will contain null entries for rules that can't be satisfied +// multiple entries may match a rule, but only the last match is returned (ie ClientHello with cookies) +func (h *handshakeCache) pull(rules ...handshakeCachePullRule) []*handshakeCacheItem { + h.mu.Lock() + defer h.mu.Unlock() + + out := make([]*handshakeCacheItem, len(rules)) + for i, r := range rules { + for _, c := range h.cache { + if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch { + switch { + case out[i] == nil: + out[i] = c + case out[i].messageSequence < c.messageSequence: + out[i] = c + } + } + } + } + + return out +} + +// fullPullMap pulls all handshakes between rules[0] to rules[len(rules)-1] as map. +func (h *handshakeCache) fullPullMap(startSeq int, cipherSuite CipherSuite, rules ...handshakeCachePullRule) (int, map[handshake.Type]handshake.Message, bool) { + h.mu.Lock() + defer h.mu.Unlock() + + ci := make(map[handshake.Type]*handshakeCacheItem) + for _, r := range rules { + var item *handshakeCacheItem + for _, c := range h.cache { + if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch { + switch { + case item == nil: + item = c + case item.messageSequence < c.messageSequence: + item = c + } + } + } + if !r.optional && item == nil { + // Missing mandatory message. + return startSeq, nil, false + } + ci[r.typ] = item + } + out := make(map[handshake.Type]handshake.Message) + seq := startSeq + for _, r := range rules { + t := r.typ + i := ci[t] + if i == nil { + continue + } + var keyExchangeAlgorithm CipherSuiteKeyExchangeAlgorithm + if cipherSuite != nil { + keyExchangeAlgorithm = cipherSuite.KeyExchangeAlgorithm() + } + rawHandshake := &handshake.Handshake{ + KeyExchangeAlgorithm: keyExchangeAlgorithm, + } + if err := rawHandshake.Unmarshal(i.data); err != nil { + return startSeq, nil, false + } + if uint16(seq) != rawHandshake.Header.MessageSequence { + // There is a gap. Some messages are not arrived. + return startSeq, nil, false + } + seq++ + out[t] = rawHandshake.Message + } + return seq, out, true +} + +// pullAndMerge calls pull and then merges the results, ignoring any null entries +func (h *handshakeCache) pullAndMerge(rules ...handshakeCachePullRule) []byte { + merged := []byte{} + + for _, p := range h.pull(rules...) { + if p != nil { + merged = append(merged, p.data...) + } + } + return merged +} + +// sessionHash returns the session hash for Extended Master Secret support +// https://tools.ietf.org/html/draft-ietf-tls-session-hash-06#section-4 +func (h *handshakeCache) sessionHash(hf prf.HashFunc, epoch uint16, additional ...[]byte) ([]byte, error) { + merged := []byte{} + + // Order defined by https://tools.ietf.org/html/rfc5246#section-7.3 + handshakeBuffer := h.pull( + handshakeCachePullRule{handshake.TypeClientHello, epoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, epoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, epoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, epoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, epoch, true, false}, + ) + + for _, p := range handshakeBuffer { + if p == nil { + continue + } + + merged = append(merged, p.data...) + } + for _, a := range additional { + merged = append(merged, a...) + } + + hash := hf() + if _, err := hash.Write(merged); err != nil { + return []byte{}, err + } + + return hash.Sum(nil), nil +} diff --git a/replace/dtls/handshake_cache_test.go b/replace/dtls/handshake_cache_test.go new file mode 100644 index 000000000..44a15b587 --- /dev/null +++ b/replace/dtls/handshake_cache_test.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "testing" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +func TestHandshakeCacheSinglePush(t *testing.T) { + for _, test := range []struct { + Name string + Rule []handshakeCachePullRule + Input []handshakeCacheItem + Expected []byte + }{ + { + Name: "Single Push", + Input: []handshakeCacheItem{ + {0, true, 0, 0, []byte{0x00}}, + }, + Rule: []handshakeCachePullRule{ + {0, 0, true, false}, + }, + Expected: []byte{0x00}, + }, + { + Name: "Multi Push", + Input: []handshakeCacheItem{ + {0, true, 0, 0, []byte{0x00}}, + {1, true, 0, 1, []byte{0x01}}, + {2, true, 0, 2, []byte{0x02}}, + }, + Rule: []handshakeCachePullRule{ + {0, 0, true, false}, + {1, 0, true, false}, + {2, 0, true, false}, + }, + Expected: []byte{0x00, 0x01, 0x02}, + }, + { + Name: "Multi Push, Rules set order", + Input: []handshakeCacheItem{ + {2, true, 0, 2, []byte{0x02}}, + {0, true, 0, 0, []byte{0x00}}, + {1, true, 0, 1, []byte{0x01}}, + }, + Rule: []handshakeCachePullRule{ + {0, 0, true, false}, + {1, 0, true, false}, + {2, 0, true, false}, + }, + Expected: []byte{0x00, 0x01, 0x02}, + }, + + { + Name: "Multi Push, Dupe Seqnum", + Input: []handshakeCacheItem{ + {0, true, 0, 0, []byte{0x00}}, + {1, true, 0, 1, []byte{0x01}}, + {1, true, 0, 1, []byte{0x01}}, + }, + Rule: []handshakeCachePullRule{ + {0, 0, true, false}, + {1, 0, true, false}, + }, + Expected: []byte{0x00, 0x01}, + }, + { + Name: "Multi Push, Dupe Seqnum Client/Server", + Input: []handshakeCacheItem{ + {0, true, 0, 0, []byte{0x00}}, + {1, true, 0, 1, []byte{0x01}}, + {1, false, 0, 1, []byte{0x02}}, + }, + Rule: []handshakeCachePullRule{ + {0, 0, true, false}, + {1, 0, true, false}, + {1, 0, false, false}, + }, + Expected: []byte{0x00, 0x01, 0x02}, + }, + { + Name: "Multi Push, Dupe Seqnum with Unique HandshakeType", + Input: []handshakeCacheItem{ + {1, true, 0, 0, []byte{0x00}}, + {2, true, 0, 1, []byte{0x01}}, + {3, false, 0, 0, []byte{0x02}}, + }, + Rule: []handshakeCachePullRule{ + {1, 0, true, false}, + {2, 0, true, false}, + {3, 0, false, false}, + }, + Expected: []byte{0x00, 0x01, 0x02}, + }, + { + Name: "Multi Push, Wrong epoch", + Input: []handshakeCacheItem{ + {1, true, 0, 0, []byte{0x00}}, + {2, true, 1, 1, []byte{0x01}}, + {2, true, 0, 2, []byte{0x11}}, + {3, false, 0, 0, []byte{0x02}}, + {3, false, 1, 0, []byte{0x12}}, + {3, false, 2, 0, []byte{0x12}}, + }, + Rule: []handshakeCachePullRule{ + {1, 0, true, false}, + {2, 1, true, false}, + {3, 0, false, false}, + }, + Expected: []byte{0x00, 0x01, 0x02}, + }, + } { + h := newHandshakeCache() + for _, i := range test.Input { + h.push(i.data, i.epoch, i.messageSequence, i.typ, i.isClient) + } + verifyData := h.pullAndMerge(test.Rule...) + if !bytes.Equal(verifyData, test.Expected) { + t.Errorf("handshakeCache '%s' exp: % 02x actual % 02x", test.Name, test.Expected, verifyData) + } + } +} + +func TestHandshakeCacheSessionHash(t *testing.T) { + for _, test := range []struct { + Name string + Rule []handshakeCachePullRule + Input []handshakeCacheItem + Expected []byte + }{ + { + Name: "Standard Handshake", + Input: []handshakeCacheItem{ + {handshake.TypeClientHello, true, 0, 0, []byte{0x00}}, + {handshake.TypeServerHello, false, 0, 1, []byte{0x01}}, + {handshake.TypeCertificate, false, 0, 2, []byte{0x02}}, + {handshake.TypeServerKeyExchange, false, 0, 3, []byte{0x03}}, + {handshake.TypeServerHelloDone, false, 0, 4, []byte{0x04}}, + {handshake.TypeClientKeyExchange, true, 0, 5, []byte{0x05}}, + }, + Expected: []byte{0x17, 0xe8, 0x8d, 0xb1, 0x87, 0xaf, 0xd6, 0x2c, 0x16, 0xe5, 0xde, 0xbf, 0x3e, 0x65, 0x27, 0xcd, 0x00, 0x6b, 0xc0, 0x12, 0xbc, 0x90, 0xb5, 0x1a, 0x81, 0x0c, 0xd8, 0x0c, 0x2d, 0x51, 0x1f, 0x43}, + }, + { + Name: "Handshake With Client Cert Request", + Input: []handshakeCacheItem{ + {handshake.TypeClientHello, true, 0, 0, []byte{0x00}}, + {handshake.TypeServerHello, false, 0, 1, []byte{0x01}}, + {handshake.TypeCertificate, false, 0, 2, []byte{0x02}}, + {handshake.TypeServerKeyExchange, false, 0, 3, []byte{0x03}}, + {handshake.TypeCertificateRequest, false, 0, 4, []byte{0x04}}, + {handshake.TypeServerHelloDone, false, 0, 5, []byte{0x05}}, + {handshake.TypeClientKeyExchange, true, 0, 6, []byte{0x06}}, + }, + Expected: []byte{0x57, 0x35, 0x5a, 0xc3, 0x30, 0x3c, 0x14, 0x8f, 0x11, 0xae, 0xf7, 0xcb, 0x17, 0x94, 0x56, 0xb9, 0x23, 0x2c, 0xde, 0x33, 0xa8, 0x18, 0xdf, 0xda, 0x2c, 0x2f, 0xcb, 0x93, 0x25, 0x74, 0x9a, 0x6b}, + }, + { + Name: "Handshake Ignores after ClientKeyExchange", + Input: []handshakeCacheItem{ + {handshake.TypeClientHello, true, 0, 0, []byte{0x00}}, + {handshake.TypeServerHello, false, 0, 1, []byte{0x01}}, + {handshake.TypeCertificate, false, 0, 2, []byte{0x02}}, + {handshake.TypeServerKeyExchange, false, 0, 3, []byte{0x03}}, + {handshake.TypeCertificateRequest, false, 0, 4, []byte{0x04}}, + {handshake.TypeServerHelloDone, false, 0, 5, []byte{0x05}}, + {handshake.TypeClientKeyExchange, true, 0, 6, []byte{0x06}}, + {handshake.TypeCertificateVerify, true, 0, 7, []byte{0x07}}, + {handshake.TypeFinished, true, 1, 7, []byte{0x08}}, + {handshake.TypeFinished, false, 1, 7, []byte{0x09}}, + }, + Expected: []byte{0x57, 0x35, 0x5a, 0xc3, 0x30, 0x3c, 0x14, 0x8f, 0x11, 0xae, 0xf7, 0xcb, 0x17, 0x94, 0x56, 0xb9, 0x23, 0x2c, 0xde, 0x33, 0xa8, 0x18, 0xdf, 0xda, 0x2c, 0x2f, 0xcb, 0x93, 0x25, 0x74, 0x9a, 0x6b}, + }, + { + Name: "Handshake Ignores wrong epoch", + Input: []handshakeCacheItem{ + {handshake.TypeClientHello, true, 0, 0, []byte{0x00}}, + {handshake.TypeServerHello, false, 0, 1, []byte{0x01}}, + {handshake.TypeCertificate, false, 0, 2, []byte{0x02}}, + {handshake.TypeServerKeyExchange, false, 0, 3, []byte{0x03}}, + {handshake.TypeCertificateRequest, false, 0, 4, []byte{0x04}}, + {handshake.TypeServerHelloDone, false, 0, 5, []byte{0x05}}, + {handshake.TypeClientKeyExchange, true, 0, 6, []byte{0x06}}, + {handshake.TypeCertificateVerify, true, 0, 7, []byte{0x07}}, + {handshake.TypeFinished, true, 0, 7, []byte{0xf0}}, + {handshake.TypeFinished, false, 0, 7, []byte{0xf1}}, + {handshake.TypeFinished, true, 1, 7, []byte{0x08}}, + {handshake.TypeFinished, false, 1, 7, []byte{0x09}}, + {handshake.TypeFinished, true, 0, 7, []byte{0xf0}}, + {handshake.TypeFinished, false, 0, 7, []byte{0xf1}}, + }, + Expected: []byte{0x57, 0x35, 0x5a, 0xc3, 0x30, 0x3c, 0x14, 0x8f, 0x11, 0xae, 0xf7, 0xcb, 0x17, 0x94, 0x56, 0xb9, 0x23, 0x2c, 0xde, 0x33, 0xa8, 0x18, 0xdf, 0xda, 0x2c, 0x2f, 0xcb, 0x93, 0x25, 0x74, 0x9a, 0x6b}, + }, + } { + h := newHandshakeCache() + for _, i := range test.Input { + h.push(i.data, i.epoch, i.messageSequence, i.typ, i.isClient) + } + + cipherSuite := ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{} + verifyData, err := h.sessionHash(cipherSuite.HashFunc(), 0) + if err != nil { + t.Error(err) + } + if !bytes.Equal(verifyData, test.Expected) { + t.Errorf("handshakeCacheSesssionHassh '%s' exp: % 02x actual % 02x", test.Name, test.Expected, verifyData) + } + } +} diff --git a/replace/dtls/handshake_test.go b/replace/dtls/handshake_test.go new file mode 100644 index 000000000..5bba7f812 --- /dev/null +++ b/replace/dtls/handshake_test.go @@ -0,0 +1,56 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "reflect" + "testing" + "time" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +func TestHandshakeMessage(t *testing.T) { + rawHandshakeMessage := []byte{ + 0x01, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0xfe, 0xfd, 0xb6, + 0x2f, 0xce, 0x5c, 0x42, 0x54, 0xff, 0x86, 0xe1, 0x24, 0x41, 0x91, 0x42, 0x62, 0x15, 0xad, + 0x16, 0xc9, 0x15, 0x8d, 0x95, 0x71, 0x8a, 0xbb, 0x22, 0xd7, 0x47, 0xec, 0xd8, 0x3d, 0xdc, + 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } + parsedHandshake := &handshake.Handshake{ + Header: handshake.Header{ + Length: 0x29, + FragmentLength: 0x29, + Type: handshake.TypeClientHello, + }, + Message: &handshake.MessageClientHello{ + Version: protocol.Version{Major: 0xFE, Minor: 0xFD}, + Random: handshake.Random{ + GMTUnixTime: time.Unix(3056586332, 0), + RandomBytes: [28]byte{0x42, 0x54, 0xff, 0x86, 0xe1, 0x24, 0x41, 0x91, 0x42, 0x62, 0x15, 0xad, 0x16, 0xc9, 0x15, 0x8d, 0x95, 0x71, 0x8a, 0xbb, 0x22, 0xd7, 0x47, 0xec, 0xd8, 0x3d, 0xdc, 0x4b}, + }, + SessionID: []byte{}, + Cookie: []byte{}, + CipherSuiteIDs: []uint16{}, + CompressionMethods: []*protocol.CompressionMethod{}, + Extensions: []extension.Extension{}, + }, + } + + h := &handshake.Handshake{} + if err := h.Unmarshal(rawHandshakeMessage); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(h, parsedHandshake) { + t.Errorf("handshakeMessageClientHello unmarshal: got %#v, want %#v", h, parsedHandshake) + } + + raw, err := h.Marshal() + if err != nil { + t.Error(err) + } else if !reflect.DeepEqual(raw, rawHandshakeMessage) { + t.Errorf("handshakeMessageClientHello marshal: got %#v, want %#v", raw, rawHandshakeMessage) + } +} diff --git a/replace/dtls/handshaker.go b/replace/dtls/handshaker.go new file mode 100644 index 000000000..38b0097c4 --- /dev/null +++ b/replace/dtls/handshaker.go @@ -0,0 +1,360 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/logging" +) + +// [RFC6347 Section-4.2.4] +// +-----------+ +// +---> | PREPARING | <--------------------+ +// | +-----------+ | +// | | | +// | | Buffer next flight | +// | | | +// | \|/ | +// | +-----------+ | +// | | SENDING |<------------------+ | Send +// | +-----------+ | | HelloRequest +// Receive | | | | +// next | | Send flight | | or +// flight | +--------+ | | +// | | | Set retransmit timer | | Receive +// | | \|/ | | HelloRequest +// | | +-----------+ | | Send +// +--)--| WAITING |-------------------+ | ClientHello +// | | +-----------+ Timer expires | | +// | | | | | +// | | +------------------------+ | +// Receive | | Send Read retransmit | +// last | | last | +// flight | | flight | +// | | | +// \|/\|/ | +// +-----------+ | +// | FINISHED | -------------------------------+ +// +-----------+ +// | /|\ +// | | +// +---+ +// Read retransmit +// Retransmit last flight + +type handshakeState uint8 + +const ( + handshakeErrored handshakeState = iota + handshakePreparing + handshakeSending + handshakeWaiting + handshakeFinished +) + +func (s handshakeState) String() string { + switch s { + case handshakeErrored: + return "Errored" + case handshakePreparing: + return "Preparing" + case handshakeSending: + return "Sending" + case handshakeWaiting: + return "Waiting" + case handshakeFinished: + return "Finished" + default: + return "Unknown" + } +} + +type handshakeFSM struct { + currentFlight flightVal + flights []*packet + retransmit bool + state *State + cache *handshakeCache + cfg *handshakeConfig + closed chan struct{} +} + +type handshakeConfig struct { + localPSKCallback PSKCallback + localPSKIdentityHint []byte + localCipherSuites []CipherSuite // Available CipherSuites + localSignatureSchemes []signaturehash.Algorithm // Available signature schemes + extendedMasterSecret ExtendedMasterSecretType // Policy for the Extended Master Support extension + localSRTPProtectionProfiles []SRTPProtectionProfile // Available SRTPProtectionProfiles, if empty no SRTP support + serverName string + supportedProtocols []string + clientAuth ClientAuthType // If we are a client should we request a client certificate + localCertificates []tls.Certificate + nameToCertificate map[string]*tls.Certificate + insecureSkipVerify bool + verifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + verifyConnection func(*State) error + sessionStore SessionStore + rootCAs *x509.CertPool + clientCAs *x509.CertPool + retransmitInterval time.Duration + customCipherSuites func() []CipherSuite + ellipticCurves []elliptic.Curve + insecureSkipHelloVerify bool + + // [Psiphon] + // Conjure DTLS support, from: https://github.com/mingyech/dtls/commit/a56eccc1 + customClientHelloRandom func() [handshake.RandomBytesLength]byte + + onFlightState func(flightVal, handshakeState) + log logging.LeveledLogger + keyLogWriter io.Writer + + localGetCertificate func(*ClientHelloInfo) (*tls.Certificate, error) + localGetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error) + + initialEpoch uint16 + + mu sync.Mutex +} + +type flightConn interface { + notify(ctx context.Context, level alert.Level, desc alert.Description) error + writePackets(context.Context, []*packet) error + recvHandshake() <-chan chan struct{} + setLocalEpoch(epoch uint16) + handleQueuedPackets(context.Context) error + sessionKey() []byte + + // [Psiphon] + LocalAddr() net.Addr +} + +func (c *handshakeConfig) writeKeyLog(label string, clientRandom, secret []byte) { + if c.keyLogWriter == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + _, err := c.keyLogWriter.Write([]byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))) + if err != nil { + c.log.Debugf("failed to write key log file: %s", err) + } +} + +func srvCliStr(isClient bool) string { + if isClient { + return "client" + } + return "server" +} + +func newHandshakeFSM( + s *State, cache *handshakeCache, cfg *handshakeConfig, + initialFlight flightVal, +) *handshakeFSM { + return &handshakeFSM{ + currentFlight: initialFlight, + state: s, + cache: cache, + cfg: cfg, + closed: make(chan struct{}), + } +} + +func (s *handshakeFSM) Run(ctx context.Context, c flightConn, initialState handshakeState) error { + state := initialState + defer func() { + close(s.closed) + }() + for { + s.cfg.log.Tracef("[handshake:%s] %s: %s", srvCliStr(s.state.isClient), s.currentFlight.String(), state.String()) + if s.cfg.onFlightState != nil { + s.cfg.onFlightState(s.currentFlight, state) + } + var err error + switch state { + case handshakePreparing: + state, err = s.prepare(ctx, c) + case handshakeSending: + state, err = s.send(ctx, c) + case handshakeWaiting: + state, err = s.wait(ctx, c) + case handshakeFinished: + state, err = s.finish(ctx, c) + default: + return errInvalidFSMTransition + } + if err != nil { + return err + } + } +} + +func (s *handshakeFSM) Done() <-chan struct{} { + return s.closed +} + +func (s *handshakeFSM) prepare(ctx context.Context, c flightConn) (handshakeState, error) { + s.flights = nil + // Prepare flights + var ( + a *alert.Alert + err error + pkts []*packet + ) + gen, retransmit, errFlight := s.currentFlight.getFlightGenerator() + if errFlight != nil { + err = errFlight + a = &alert.Alert{Level: alert.Fatal, Description: alert.InternalError} + } else { + // [Psiphon] + // Pass in dial context for GetDTLSSeed. + pkts, a, err = gen(ctx, c, s.state, s.cache, s.cfg) + s.retransmit = retransmit + } + if a != nil { + if alertErr := c.notify(ctx, a.Level, a.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + + s.flights = pkts + epoch := s.cfg.initialEpoch + nextEpoch := epoch + for _, p := range s.flights { + p.record.Header.Epoch += epoch + if p.record.Header.Epoch > nextEpoch { + nextEpoch = p.record.Header.Epoch + } + if h, ok := p.record.Content.(*handshake.Handshake); ok { + h.Header.MessageSequence = uint16(s.state.handshakeSendSequence) + s.state.handshakeSendSequence++ + } + } + if epoch != nextEpoch { + s.cfg.log.Tracef("[handshake:%s] -> changeCipherSpec (epoch: %d)", srvCliStr(s.state.isClient), nextEpoch) + c.setLocalEpoch(nextEpoch) + } + return handshakeSending, nil +} + +func (s *handshakeFSM) send(ctx context.Context, c flightConn) (handshakeState, error) { + // Send flights + if err := c.writePackets(ctx, s.flights); err != nil { + return handshakeErrored, err + } + + if s.currentFlight.isLastSendFlight() { + return handshakeFinished, nil + } + return handshakeWaiting, nil +} + +func (s *handshakeFSM) wait(ctx context.Context, c flightConn) (handshakeState, error) { //nolint:gocognit + parse, errFlight := s.currentFlight.getFlightParser() + if errFlight != nil { + if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil { + if errFlight != nil { + return handshakeErrored, alertErr + } + } + return handshakeErrored, errFlight + } + + retransmitTimer := time.NewTimer(s.cfg.retransmitInterval) + for { + select { + case done := <-c.recvHandshake(): + nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg) + close(done) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + if nextFlight == 0 { + break + } + s.cfg.log.Tracef("[handshake:%s] %s -> %s", srvCliStr(s.state.isClient), s.currentFlight.String(), nextFlight.String()) + if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight { + return handshakeFinished, nil + } + s.currentFlight = nextFlight + return handshakePreparing, nil + + case <-retransmitTimer.C: + if !s.retransmit { + return handshakeWaiting, nil + } + return handshakeSending, nil + case <-ctx.Done(): + return handshakeErrored, ctx.Err() + } + } +} + +func (s *handshakeFSM) finish(ctx context.Context, c flightConn) (handshakeState, error) { + parse, errFlight := s.currentFlight.getFlightParser() + if errFlight != nil { + if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil { + if errFlight != nil { + return handshakeErrored, alertErr + } + } + return handshakeErrored, errFlight + } + + retransmitTimer := time.NewTimer(s.cfg.retransmitInterval) + select { + case done := <-c.recvHandshake(): + nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg) + close(done) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + if nextFlight == 0 { + break + } + if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight { + return handshakeFinished, nil + } + <-retransmitTimer.C + // Retransmit last flight + return handshakeSending, nil + + case <-ctx.Done(): + return handshakeErrored, ctx.Err() + } + return handshakeFinished, nil +} diff --git a/replace/dtls/handshaker_test.go b/replace/dtls/handshaker_test.go new file mode 100644 index 000000000..6cf7cd3cf --- /dev/null +++ b/replace/dtls/handshaker_test.go @@ -0,0 +1,447 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package dtls + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "sync" + "testing" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/selfsign" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + "github.com/pion/logging" + "github.com/pion/transport/v2/test" +) + +const nonZeroRetransmitInterval = 100 * time.Millisecond + +// Test that writes to the key log are in the correct format and only applies +// when a key log writer is given. +func TestWriteKeyLog(t *testing.T) { + var buf bytes.Buffer + cfg := handshakeConfig{ + keyLogWriter: &buf, + } + cfg.writeKeyLog("LABEL", []byte{0xAA, 0xBB, 0xCC}, []byte{0xDD, 0xEE, 0xFF}) + + // Secrets follow the format