diff --git a/.drone.yml b/.drone.yml index 03a83c070fea1..004b53a2e9575 100644 --- a/.drone.yml +++ b/.drone.yml @@ -503,7 +503,7 @@ steps: pull: always image: techknowlogick/xgo:go-1.16.x commands: - - curl -sL https://deb.nodesource.com/setup_14.x | bash - && apt-get install -y nodejs + - curl -sL https://deb.nodesource.com/setup_16.x | bash - && apt-get install -y nodejs - export PATH=$PATH:$GOPATH/bin - make release environment: @@ -599,7 +599,7 @@ steps: pull: always image: techknowlogick/xgo:go-1.16.x commands: - - curl -sL https://deb.nodesource.com/setup_14.x | bash - && apt-get install -y nodejs + - curl -sL https://deb.nodesource.com/setup_16.x | bash - && apt-get install -y nodejs - export PATH=$PATH:$GOPATH/bin - make release environment: diff --git a/Dockerfile.rootless b/Dockerfile.rootless index a379babc2d9a7..6f4e704f00726 100644 --- a/Dockerfile.rootless +++ b/Dockerfile.rootless @@ -35,6 +35,7 @@ RUN apk --no-cache add \ ca-certificates \ gettext \ git \ + curl \ gnupg RUN addgroup \ diff --git a/cmd/web.go b/cmd/web.go index 423917ba4e112..3a5c36833b272 100644 --- a/cmd/web.go +++ b/cmd/web.go @@ -175,7 +175,7 @@ func setPort(port string) error { cfg.Section("server").Key("LOCAL_ROOT_URL").SetValue(defaultLocalURL) if err := cfg.SaveTo(setting.CustomConf); err != nil { - return fmt.Errorf("Error saving generated JWT Secret to custom config: %v", err) + return fmt.Errorf("Error saving generated LOCAL_ROOT_URL to custom config: %v", err) } } return nil diff --git a/cmd/web_letsencrypt.go b/cmd/web_letsencrypt.go index 9cfc9b3ab2212..15a3c92e8baab 100644 --- a/cmd/web_letsencrypt.go +++ b/cmd/web_letsencrypt.go @@ -24,10 +24,14 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) enableHTTPChallenge := true enableTLSALPNChallenge := true altHTTPPort := 0 + altTLSALPNPort := 0 if p, err := strconv.Atoi(setting.PortToRedirect); err == nil { altHTTPPort = p } + if p, err := strconv.Atoi(setting.HTTPPort); err == nil { + altTLSALPNPort = p + } magic := certmagic.NewDefault() magic.Storage = &certmagic.FileStorage{Path: directory} @@ -36,7 +40,8 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) Agreed: setting.LetsEncryptTOS, DisableHTTPChallenge: !enableHTTPChallenge, DisableTLSALPNChallenge: !enableTLSALPNChallenge, - ListenHost: listenAddr, + ListenHost: setting.HTTPAddr, + AltTLSALPNPort: altTLSALPNPort, AltHTTPPort: altHTTPPort, }) diff --git a/contrib/systemd/gitea.service b/contrib/systemd/gitea.service index ac6a13ec573e7..d6a4377ec8091 100644 --- a/contrib/systemd/gitea.service +++ b/contrib/systemd/gitea.service @@ -3,14 +3,23 @@ Description=Gitea (Git with a cup of tea) After=syslog.target After=network.target ### -# Don't forget to add the database service requirements +# Don't forget to add the database service dependencies ### # -#Requires=mysql.service -#Requires=mariadb.service -#Requires=postgresql.service -#Requires=memcached.service -#Requires=redis.service +#Wants=mysql.service +#After=mysql.service +# +#Wants=mariadb.service +#After=mariadb.service +# +#Wants=postgresql.service +#After=postgresql.service +# +#Wants=memcached.service +#After=memcached.service +# +#Wants=redis.service +#After=redis.service # ### # If using socket activation for main http/s diff --git a/docs/content/doc/developers/api-usage.en-us.md b/docs/content/doc/developers/api-usage.en-us.md index 15fedbe2c164d..06cbc9b72e1e1 100644 --- a/docs/content/doc/developers/api-usage.en-us.md +++ b/docs/content/doc/developers/api-usage.en-us.md @@ -40,8 +40,42 @@ better understand this by looking at the code -- as of this writing, Gitea parses queries and headers to find the token in [modules/auth/auth.go](https://github.com/go-gitea/gitea/blob/6efdcaed86565c91a3dc77631372a9cc45a58e89/modules/auth/auth.go#L47). -You can create an API key token via your Gitea installation's web interface: -`Settings | Applications | Generate New Token`. +## Generating and listing API tokens + +A new token can be generated with a `POST` request to +`/users/:name/tokens`. + +Note that `/users/:name/tokens` is a special endpoint and requires you +to authenticate using `BasicAuth` and a password, as follows: + + +```sh +$ curl -XPOST -H "Content-Type: application/json" -k -d '{"name":"test"}' -u username:password https://gitea.your.host/api/v1/users//tokens +{"id":1,"name":"test","sha1":"9fcb1158165773dd010fca5f0cf7174316c3e37d","token_last_eight":"16c3e37d"} +``` + +The ``sha1`` (the token) is only returned once and is not stored in +plain-text. It will not be displayed when listing tokens with a `GET` +request; e.g. + +```sh +$ curl --request GET --url https://yourusername:password@gitea.your.host/api/v1/users//tokens +[{"name":"test","sha1":"","token_last_eight:"........":},{"name":"dev","sha1":"","token_last_eight":"........"}] +``` + +To use the API with basic authentication with two factor authentication +enabled, you'll need to send an additional header that contains the one +time password (6 digitrotating token). +An example of the header is `X-Gitea-OTP: 123456` where `123456` +is where you'd place the code from your authenticator. +Here is how the request would look like in curl: + +```sh +$ curl -H "X-Gitea-OTP: 123456" --request GET --url https://yourusername:yourpassword@gitea.your.host/api/v1/users/yourusername/tokens +``` + +You can also create an API key token via your Gitea installation's web +interface: `Settings | Applications | Generate New Token`. ## OAuth2 Provider @@ -82,26 +116,6 @@ or on The OpenAPI document is at: `https://gitea.your.host/swagger.v1.json` -## Listing your issued tokens via the API - -As mentioned in -[#3842](https://github.com/go-gitea/gitea/issues/3842#issuecomment-397743346), -`/users/:name/tokens` is special and requires you to authenticate -using BasicAuth, as follows: - -### Using basic authentication: - -```sh -$ curl --request GET --url https://yourusername:yourpassword@gitea.your.host/api/v1/users/yourusername/tokens -[{"name":"test","sha1":"..."},{"name":"dev","sha1":"..."}] -``` - -As of v1.8.0 of Gitea, if using basic authentication with the API and your user has two factor authentication enabled, you'll need to send an additional header that contains the one time password (6 digit rotating token). An example of the header is `X-Gitea-OTP: 123456` where `123456` is where you'd place the code from your authenticator. Here is how the request would look like in curl: - -```sh -$ curl -H "X-Gitea-OTP: 123456" --request GET --url https://yourusername:yourpassword@gitea.your.host/api/v1/users/yourusername/tokens -``` - ## Sudo The API allows admin users to sudo API requests as another user. Simply add either a `sudo=` parameter or `Sudo:` request header with the username of the user to sudo. diff --git a/docs/content/doc/developers/hacking-on-gitea.en-us.md b/docs/content/doc/developers/hacking-on-gitea.en-us.md index ede693ce179e4..360a46d20fbf1 100644 --- a/docs/content/doc/developers/hacking-on-gitea.en-us.md +++ b/docs/content/doc/developers/hacking-on-gitea.en-us.md @@ -127,7 +127,7 @@ See `make help` for all available `make` targets. Also see [`.drone.yml`](https: ## Building continuously -To run and continously rebuild when source files change: +To run and continuously rebuild when source files change: ```bash make watch @@ -216,7 +216,7 @@ You should validate your generated Swagger file and spell-check it with: make swagger-validate misspell-check ``` -You should commit the changed swagger JSON file. The continous integration +You should commit the changed swagger JSON file. The continuous integration server will check that this has been done using: ```bash @@ -315,7 +315,7 @@ branches as we will need to update it to main before merging and/or may be able to help fix issues directly. Any PR requires two approvals from the Gitea maintainers and needs to pass the -continous integration. Take a look at our +continuous integration. Take a look at our [`CONTRIBUTING.md`](https://github.com/go-gitea/gitea/blob/main/CONTRIBUTING.md) document. diff --git a/docs/content/doc/features/authentication.en-us.md b/docs/content/doc/features/authentication.en-us.md index 0c83fa4d2f8d6..223d7aa4fb0cc 100644 --- a/docs/content/doc/features/authentication.en-us.md +++ b/docs/content/doc/features/authentication.en-us.md @@ -88,8 +88,8 @@ Adds the following fields: - Bind Password (optional) - The password for the Bind DN specified above, if any. _Note: The password - is stored in plaintext at the server. As such, ensure that the Bind DN - has as few privileges as possible._ + is stored encrypted with the SECRET_KEY on the server. It is still recommended + to ensure that the Bind DN has as few privileges as possible._ - User Search Base **(required)** diff --git a/go.mod b/go.mod index e654de43a3618..e2c1c6aba897d 100644 --- a/go.mod +++ b/go.mod @@ -131,7 +131,7 @@ require ( mvdan.cc/xurls/v2 v2.2.0 strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 xorm.io/builder v0.3.9 - xorm.io/xorm v1.0.7 + xorm.io/xorm v1.1.0 ) replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1 diff --git a/go.sum b/go.sum index 4ca9716f5e45f..b511d6c3f75c0 100644 --- a/go.sum +++ b/go.sum @@ -250,6 +250,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= @@ -812,6 +813,7 @@ github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxm github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -982,6 +984,8 @@ github.com/quasoft/websspi v1.0.0 h1:5nDgdM5xSur9s+B5w2xQ5kxf5nUGqgFgU4W0aDLZ8Mw github.com/quasoft/websspi v1.0.0/go.mod h1:HmVdl939dQ0WIXZhyik+ARdI03M6bQzaSEKcgpFmewk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1403,6 +1407,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1496,6 +1501,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1663,6 +1669,33 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009 h1:u0oCo5b9wyLr++HF3AN9JicGhkUxJhMz51+8TIZH9N0= +modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/ccgo/v3 v3.9.0 h1:JbcEIqjw4Agf+0g3Tc85YvfYqkkFOv6xBwS4zkfqSoA= +modernc.org/ccgo/v3 v3.9.0/go.mod h1:nQbgkn8mwzPdp4mm6BT6+p85ugQ7FrGgIcYaE7nSrpY= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.8.0 h1:Pp4uv9g0csgBMpGPABKtkieF6O5MGhfGo6ZiOdlYfR8= +modernc.org/libc v1.8.0/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.10.1-0.20210314190707-798bbeb9bb84 h1:rgEUzE849tFlHSoeCrKyS9cZAljC+DY7MdMHKq6R6sY= +modernc.org/sqlite v1.10.1-0.20210314190707-798bbeb9bb84/go.mod h1:PGzq6qlhyYjL6uVbSgS6WoF7ZopTW/sI7+7p+mb4ZVU= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/tcl v1.5.0 h1:euZSUNfE0Fd4W8VqXI1Ly1v7fqDJoBuAV88Ea+SnaSs= +modernc.org/tcl v1.5.0/go.mod h1:gb57hj4pO8fRrK54zveIfFXBaMHK3SKJNWcmRw1cRzc= +modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= mvdan.cc/xurls/v2 v2.2.0 h1:NSZPykBXJFCetGZykLAxaL6SIpvbVy/UFEniIfHAa8A= mvdan.cc/xurls/v2 v2.2.0/go.mod h1:EV1RMtya9D6G5DMYPGD8zTQzaHet6Jh8gFlRgGRJeO8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1673,8 +1706,9 @@ sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1 strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 h1:mUcz5b3FJbP5Cvdq7Khzn6J9OCUQJaBwgBkCR+MOwSs= strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY= xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= +xorm.io/builder v0.3.8/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc= xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= -xorm.io/xorm v1.0.7 h1:26yBTDVI+CfQpVz2Y88fISh+aiJXIPP4eNoTJlwzsC4= -xorm.io/xorm v1.0.7/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= +xorm.io/xorm v1.1.0 h1:mkEsQXLauZajiOld2cB2PkFcUZKePepPgs1bC1dw8RA= +xorm.io/xorm v1.1.0/go.mod h1:EDzNHMuCVZNszkIRSLL2nI0zX+nQE8RstAVranlSfqI= diff --git a/integrations/api_repo_lfs_locks_test.go b/integrations/api_repo_lfs_locks_test.go index 69981d1c42000..ffc239567dc91 100644 --- a/integrations/api_repo_lfs_locks_test.go +++ b/integrations/api_repo_lfs_locks_test.go @@ -44,7 +44,7 @@ func TestAPILFSLocksNotLogin(t *testing.T) { resp := MakeRequest(t, req, http.StatusUnauthorized) var lfsLockError api.LFSLockError DecodeJSON(t, resp, &lfsLockError) - assert.Equal(t, "Unauthorized", lfsLockError.Message) + assert.Equal(t, "You must have pull access to list locks", lfsLockError.Message) } func TestAPILFSLocksLogged(t *testing.T) { diff --git a/models/admin.go b/models/admin.go index 7911ce75112cd..3a784d66964ab 100644 --- a/models/admin.go +++ b/models/admin.go @@ -114,6 +114,11 @@ func DeleteNotice(id int64) error { // DeleteNotices deletes all notices with ID from start to end (inclusive). func DeleteNotices(start, end int64) error { + if start == 0 && end == 0 { + _, err := x.Exec("DELETE FROM notice") + return err + } + sess := x.Where("id >= ?", start) if end > 0 { sess.And("id <= ?", end) diff --git a/models/login_source.go b/models/login_source.go index 57b1d56bb2bcd..098b48a8cd5f4 100644 --- a/models/login_source.go +++ b/models/login_source.go @@ -18,6 +18,7 @@ import ( "code.gitea.io/gitea/modules/auth/oauth2" "code.gitea.io/gitea/modules/auth/pam" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/secret" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" @@ -77,11 +78,25 @@ type LDAPConfig struct { // FromDB fills up a LDAPConfig from serialized format. func (cfg *LDAPConfig) FromDB(bs []byte) error { json := jsoniter.ConfigCompatibleWithStandardLibrary - return json.Unmarshal(bs, &cfg) + err := json.Unmarshal(bs, &cfg) + if err != nil { + return err + } + if cfg.BindPasswordEncrypt != "" { + cfg.BindPassword, err = secret.DecryptSecret(setting.SecretKey, cfg.BindPasswordEncrypt) + cfg.BindPasswordEncrypt = "" + } + return err } // ToDB exports a LDAPConfig to a serialized format. func (cfg *LDAPConfig) ToDB() ([]byte, error) { + var err error + cfg.BindPasswordEncrypt, err = secret.EncryptSecret(setting.SecretKey, cfg.BindPassword) + if err != nil { + return nil, err + } + cfg.BindPassword = "" json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Marshal(cfg) } diff --git a/modules/auth/ldap/ldap.go b/modules/auth/ldap/ldap.go index 6c557de018c4b..91ad33a60f3a4 100644 --- a/modules/auth/ldap/ldap.go +++ b/modules/auth/ldap/ldap.go @@ -35,6 +35,7 @@ type Source struct { SecurityProtocol SecurityProtocol SkipVerify bool BindDN string // DN to bind with + BindPasswordEncrypt string // Encrypted Bind BN password BindPassword string // Bind DN password UserBase string // Base search path for users UserDN string // Template for the DN of the user for simple auth diff --git a/modules/auth/sso/basic.go b/modules/auth/sso/basic.go index d4ac8f8089c90..a18e127ff93f0 100644 --- a/modules/auth/sso/basic.go +++ b/modules/auth/sso/basic.go @@ -14,6 +14,7 @@ import ( "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/modules/web/middleware" ) // Ensure the struct implements the interface. @@ -40,7 +41,7 @@ func (b *Basic) Free() error { // IsEnabled returns true as this plugin is enabled by default and its not possible to disable // it from settings. func (b *Basic) IsEnabled() bool { - return setting.Service.EnableBasicAuth + return true } // VerifyAuthData extracts and validates Basic data (username and password/token) from the @@ -48,17 +49,22 @@ func (b *Basic) IsEnabled() bool { // name/token on successful validation. // Returns nil if header is empty or validation fails. func (b *Basic) VerifyAuthData(req *http.Request, w http.ResponseWriter, store DataStore, sess SessionStore) *models.User { + + // Basic authentication should only fire on API, Download or on Git or LFSPaths + if middleware.IsInternalPath(req) || !middleware.IsAPIPath(req) && !isAttachmentDownload(req) && !isGitOrLFSPath(req) { + return nil + } + baHead := req.Header.Get("Authorization") if len(baHead) == 0 { return nil } - auths := strings.Fields(baHead) + auths := strings.SplitN(baHead, " ", 2) if len(auths) != 2 || (auths[0] != "Basic" && auths[0] != "basic") { return nil } - var u *models.User uname, passwd, _ := base.BasicAuthDecode(auths[1]) // Check if username or password is a token @@ -76,20 +82,21 @@ func (b *Basic) VerifyAuthData(req *http.Request, w http.ResponseWriter, store D uid := CheckOAuthAccessToken(authToken) if uid != 0 { log.Trace("Basic Authorization: Valid OAuthAccessToken for user[%d]", uid) - var err error - store.GetData()["IsApiToken"] = true - u, err = models.GetUserByID(uid) + u, err := models.GetUserByID(uid) if err != nil { log.Error("GetUserByID: %v", err) return nil } + + store.GetData()["IsApiToken"] = true + return u } + token, err := models.GetAccessTokenBySHA(authToken) if err == nil { log.Trace("Basic Authorization: Valid AccessToken for user[%d]", uid) - - u, err = models.GetUserByID(token.UID) + u, err := models.GetUserByID(token.UID) if err != nil { log.Error("GetUserByID: %v", err) return nil @@ -99,22 +106,24 @@ func (b *Basic) VerifyAuthData(req *http.Request, w http.ResponseWriter, store D if err = models.UpdateAccessToken(token); err != nil { log.Error("UpdateAccessToken: %v", err) } + + store.GetData()["IsApiToken"] = true + return u } else if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { log.Error("GetAccessTokenBySha: %v", err) } - if u == nil { - log.Trace("Basic Authorization: Attempting SignIn for %s", uname) + if !setting.Service.EnableBasicAuth { + return nil + } - u, err = models.UserSignIn(uname, passwd) - if err != nil { - if !models.IsErrUserNotExist(err) { - log.Error("UserSignIn: %v", err) - } - return nil + log.Trace("Basic Authorization: Attempting SignIn for %s", uname) + u, err := models.UserSignIn(uname, passwd) + if err != nil { + if !models.IsErrUserNotExist(err) { + log.Error("UserSignIn: %v", err) } - } else { - store.GetData()["IsApiToken"] = true + return nil } log.Trace("Basic Authorization: Logged in user %-v", u) diff --git a/modules/auth/sso/reverseproxy.go b/modules/auth/sso/reverseproxy.go index 62598a15cdc33..d4fae9d5f425b 100644 --- a/modules/auth/sso/reverseproxy.go +++ b/modules/auth/sso/reverseproxy.go @@ -12,6 +12,7 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/web/middleware" gouuid "github.com/google/uuid" ) @@ -69,13 +70,21 @@ func (r *ReverseProxy) VerifyAuthData(req *http.Request, w http.ResponseWriter, user, err := models.GetUserByName(username) if err != nil { - if models.IsErrUserNotExist(err) && r.isAutoRegisterAllowed() { - return r.newUser(req) + if !models.IsErrUserNotExist(err) || !r.isAutoRegisterAllowed() { + log.Error("GetUserByName: %v", err) + return nil } - log.Error("GetUserByName: %v", err) - return nil + user = r.newUser(req) } + // Make sure requests to API paths, attachment downloads, git and LFS do not create a new session + if !middleware.IsAPIPath(req) && !isAttachmentDownload(req) && !isGitOrLFSPath(req) { + if sess.Get("uid").(int64) != user.ID { + handleSignIn(w, req, sess, user) + } + } + store.GetData()["IsReverseProxy"] = true + log.Trace("ReverseProxy Authorization: Logged in user %-v", user) return user } @@ -104,7 +113,6 @@ func (r *ReverseProxy) newUser(req *http.Request) *models.User { user := &models.User{ Name: username, Email: email, - Passwd: username, IsActive: true, } if err := models.CreateUser(user); err != nil { @@ -112,5 +120,6 @@ func (r *ReverseProxy) newUser(req *http.Request) *models.User { log.Error("CreateUser: %v", err) return nil } + return user } diff --git a/modules/auth/sso/sso.go b/modules/auth/sso/sso.go index 8785a5f068f28..2f949cb0f8584 100644 --- a/modules/auth/sso/sso.go +++ b/modules/auth/sso/sso.go @@ -9,10 +9,12 @@ import ( "fmt" "net/http" "reflect" + "regexp" "strings" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/web/middleware" ) @@ -27,9 +29,9 @@ import ( // for users that have already signed in. var ssoMethods = []SingleSignOn{ &OAuth2{}, + &Basic{}, &Session{}, &ReverseProxy{}, - &Basic{}, } // The purpose of the following three function variables is to let the linter know that @@ -102,6 +104,19 @@ func isAttachmentDownload(req *http.Request) bool { return strings.HasPrefix(req.URL.Path, "/attachments/") && req.Method == "GET" } +var gitPathRe = regexp.MustCompile(`^/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/(?:(?:git-(?:(?:upload)|(?:receive))-pack$)|(?:info/refs$)|(?:HEAD$)|(?:objects/))`) +var lfsPathRe = regexp.MustCompile(`^/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/info/lfs/`) + +func isGitOrLFSPath(req *http.Request) bool { + if gitPathRe.MatchString(req.URL.Path) { + return true + } + if setting.LFS.StartServer { + return lfsPathRe.MatchString(req.URL.Path) + } + return false +} + // handleSignIn clears existing session variables and stores new ones for the specified user object func handleSignIn(resp http.ResponseWriter, req *http.Request, sess SessionStore, user *models.User) { _ = sess.Delete("openid_verified_uri") diff --git a/modules/auth/sso/sso_test.go b/modules/auth/sso/sso_test.go new file mode 100644 index 0000000000000..b6a7f099e3a2f --- /dev/null +++ b/modules/auth/sso/sso_test.go @@ -0,0 +1,124 @@ +// Copyright 2014 The Gogs Authors. All rights reserved. +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package sso + +import ( + "net/http" + "testing" + + "code.gitea.io/gitea/modules/setting" +) + +func Test_isGitOrLFSPath(t *testing.T) { + + tests := []struct { + path string + + want bool + }{ + { + "/owner/repo/git-upload-pack", + true, + }, + { + "/owner/repo/git-receive-pack", + true, + }, + { + "/owner/repo/info/refs", + true, + }, + { + "/owner/repo/HEAD", + true, + }, + { + "/owner/repo/objects/info/alternates", + true, + }, + { + "/owner/repo/objects/info/http-alternates", + true, + }, + { + "/owner/repo/objects/info/packs", + true, + }, + { + "/owner/repo/objects/info/blahahsdhsdkla", + true, + }, + { + "/owner/repo/objects/01/23456789abcdef0123456789abcdef01234567", + true, + }, + { + "/owner/repo/objects/pack/pack-123456789012345678921234567893124567894.pack", + true, + }, + { + "/owner/repo/objects/pack/pack-0123456789abcdef0123456789abcdef0123456.idx", + true, + }, + { + "/owner/repo/stars", + false, + }, + { + "/notowner", + false, + }, + { + "/owner/repo", + false, + }, + { + "/owner/repo/commit/123456789012345678921234567893124567894", + false, + }, + } + lfsTests := []string{ + "/owner/repo/info/lfs/", + "/owner/repo/info/lfs/objects/batch", + "/owner/repo/info/lfs/objects/oid/filename", + "/owner/repo/info/lfs/objects/oid", + "/owner/repo/info/lfs/objects", + "/owner/repo/info/lfs/verify", + "/owner/repo/info/lfs/locks", + "/owner/repo/info/lfs/locks/verify", + "/owner/repo/info/lfs/locks/123/unlock", + } + + origLFSStartServer := setting.LFS.StartServer + + for _, tt := range tests { + t.Run(tt.path, func(t *testing.T) { + req, _ := http.NewRequest("POST", "http://localhost"+tt.path, nil) + setting.LFS.StartServer = false + if got := isGitOrLFSPath(req); got != tt.want { + t.Errorf("isGitOrLFSPath() = %v, want %v", got, tt.want) + } + setting.LFS.StartServer = true + if got := isGitOrLFSPath(req); got != tt.want { + t.Errorf("isGitOrLFSPath() = %v, want %v", got, tt.want) + } + }) + } + for _, tt := range lfsTests { + t.Run(tt, func(t *testing.T) { + req, _ := http.NewRequest("POST", tt, nil) + setting.LFS.StartServer = false + if got := isGitOrLFSPath(req); got != setting.LFS.StartServer { + t.Errorf("isGitOrLFSPath(%q) = %v, want %v, %v", tt, got, setting.LFS.StartServer, gitPathRe.MatchString(tt)) + } + setting.LFS.StartServer = true + if got := isGitOrLFSPath(req); got != setting.LFS.StartServer { + t.Errorf("isGitOrLFSPath(%q) = %v, want %v", tt, got, setting.LFS.StartServer) + } + }) + } + setting.LFS.StartServer = origLFSStartServer +} diff --git a/modules/context/context.go b/modules/context/context.go index 750941b1d1003..d812d7b58cddc 100644 --- a/modules/context/context.go +++ b/modules/context/context.go @@ -683,6 +683,9 @@ func Contexter() func(next http.Handler) http.Handler { } else { ctx.Data["SignedUserID"] = int64(0) ctx.Data["SignedUserName"] = "" + + // ensure the session uid is deleted + _ = ctx.Session.Delete("uid") } ctx.Resp.Header().Set(`X-Frame-Options`, `SAMEORIGIN`) diff --git a/modules/eventsource/manager.go b/modules/eventsource/manager.go index 212fe60569698..812d6739929da 100644 --- a/modules/eventsource/manager.go +++ b/modules/eventsource/manager.go @@ -13,6 +13,7 @@ type Manager struct { mutex sync.Mutex messengers map[int64]*Messenger + connection chan struct{} } var manager *Manager @@ -20,6 +21,7 @@ var manager *Manager func init() { manager = &Manager{ messengers: make(map[int64]*Messenger), + connection: make(chan struct{}, 1), } } @@ -36,6 +38,10 @@ func (m *Manager) Register(uid int64) <-chan *Event { messenger = NewMessenger(uid) m.messengers[uid] = messenger } + select { + case m.connection <- struct{}{}: + default: + } m.mutex.Unlock() return messenger.Register() } diff --git a/modules/eventsource/manager_run.go b/modules/eventsource/manager_run.go index ccfe2e07097a0..60598ecb495f5 100644 --- a/modules/eventsource/manager_run.go +++ b/modules/eventsource/manager_run.go @@ -34,6 +34,35 @@ loop: timer.Stop() break loop case <-timer.C: + m.mutex.Lock() + connectionCount := len(m.messengers) + if connectionCount == 0 { + log.Trace("Event source has no listeners") + // empty the connection channel + select { + case <-m.connection: + default: + } + } + m.mutex.Unlock() + if connectionCount == 0 { + // No listeners so the source can be paused + log.Trace("Pausing the eventsource") + select { + case <-ctx.Done(): + break loop + case <-m.connection: + log.Trace("Connection detected - restarting the eventsource") + // OK we're back so lets reset the timer and start again + // We won't change the "then" time because there could be concurrency issues + select { + case <-timer.C: + default: + } + continue + } + } + now := timeutil.TimeStampNow().Add(-2) uidCounts, err := models.GetUIDsAndNotificationCounts(then, now) diff --git a/modules/git/batch_reader.go b/modules/git/batch_reader.go index 3d3a6916f5355..d6ee0ce8e04d9 100644 --- a/modules/git/batch_reader.go +++ b/modules/git/batch_reader.go @@ -186,17 +186,18 @@ headerLoop: // constant hextable to help quickly convert between 20byte and 40byte hashes const hextable = "0123456789abcdef" -// To40ByteSHA converts a 20-byte SHA in a 40-byte slice into a 40-byte sha in place -// without allocations. This is at least 100x quicker that hex.EncodeToString -// NB This requires that sha is a 40-byte slice -func To40ByteSHA(sha []byte) []byte { +// To40ByteSHA converts a 20-byte SHA into a 40-byte sha. Input and output can be the +// same 40 byte slice to support in place conversion without allocations. +// This is at least 100x quicker that hex.EncodeToString +// NB This requires that out is a 40-byte slice +func To40ByteSHA(sha, out []byte) []byte { for i := 19; i >= 0; i-- { v := sha[i] vhi, vlo := v>>4, v&0x0f shi, slo := hextable[vhi], hextable[vlo] - sha[i*2], sha[i*2+1] = shi, slo + out[i*2], out[i*2+1] = shi, slo } - return sha + return out } // ParseTreeLineSkipMode reads an entry from a tree in a cat-file --batch stream diff --git a/modules/git/command.go b/modules/git/command.go index fe258954628e0..ef78464d5f1c0 100644 --- a/modules/git/command.go +++ b/modules/git/command.go @@ -124,12 +124,18 @@ func (c *Command) RunInDirTimeoutEnvFullPipelineFunc(env []string, timeout time. cmd := exec.CommandContext(ctx, c.name, c.args...) if env == nil { - cmd.Env = append(os.Environ(), fmt.Sprintf("LC_ALL=%s", DefaultLocale)) + cmd.Env = os.Environ() } else { cmd.Env = env - cmd.Env = append(cmd.Env, fmt.Sprintf("LC_ALL=%s", DefaultLocale)) } + cmd.Env = append( + cmd.Env, + fmt.Sprintf("LC_ALL=%s", DefaultLocale), + // avoid prompting for credentials interactively, supported since git v2.3 + "GIT_TERMINAL_PROMPT=0", + ) + // TODO: verify if this is still needed in golang 1.15 if goVersionLessThan115 { cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1") diff --git a/modules/git/commit_info_nogogit.go b/modules/git/commit_info_nogogit.go index b69d50dfc027c..485271f145152 100644 --- a/modules/git/commit_info_nogogit.go +++ b/modules/git/commit_info_nogogit.go @@ -310,7 +310,7 @@ revListLoop: commits[0] = string(commitID) } } - treeID = To40ByteSHA(treeID) + treeID = To40ByteSHA(treeID, treeID) _, err = batchStdinWriter.Write(treeID) if err != nil { return nil, err diff --git a/modules/git/pipeline/lfs_nogogit.go b/modules/git/pipeline/lfs_nogogit.go index 6113bb301df1a..e618dd04b7a3d 100644 --- a/modules/git/pipeline/lfs_nogogit.go +++ b/modules/git/pipeline/lfs_nogogit.go @@ -72,7 +72,7 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) { fnameBuf := make([]byte, 4096) modeBuf := make([]byte, 40) - workingShaBuf := make([]byte, 40) + workingShaBuf := make([]byte, 20) for scan.Scan() { // Get the next commit ID @@ -140,7 +140,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) { } resultsMap[curCommit.ID.String()+":"+curPath+string(fname)] = &result } else if string(mode) == git.EntryModeTree.String() { - trees = append(trees, git.To40ByteSHA(sha20byte)) + sha40Byte := make([]byte, 40) + git.To40ByteSHA(sha20byte, sha40Byte) + trees = append(trees, sha40Byte) paths = append(paths, curPath+string(fname)+"/") } } diff --git a/modules/graceful/context.go b/modules/graceful/context.go index 1ad1109b4e5bd..9d955329a42b9 100644 --- a/modules/graceful/context.go +++ b/modules/graceful/context.go @@ -6,17 +6,9 @@ package graceful import ( "context" - "fmt" "time" ) -// Errors for context.Err() -var ( - ErrShutdown = fmt.Errorf("Graceful Manager called Shutdown") - ErrHammer = fmt.Errorf("Graceful Manager called Hammer") - ErrTerminate = fmt.Errorf("Graceful Manager called Terminate") -) - // ChannelContext is a context that wraps a channel and error as a context type ChannelContext struct { done <-chan struct{} @@ -63,28 +55,19 @@ func (ctx *ChannelContext) Value(key interface{}) interface{} { // Callers using this context should ensure that they are registered as a running server // in order that they are waited for. func (g *Manager) ShutdownContext() context.Context { - return &ChannelContext{ - done: g.IsShutdown(), - err: ErrShutdown, - } + return g.shutdownCtx } // HammerContext returns a context.Context that is Done at hammer // Callers using this context should ensure that they are registered as a running server // in order that they are waited for. func (g *Manager) HammerContext() context.Context { - return &ChannelContext{ - done: g.IsHammer(), - err: ErrHammer, - } + return g.hammerCtx } // TerminateContext returns a context.Context that is Done at terminate // Callers using this context should ensure that they are registered as a terminating server // in order that they are waited for. func (g *Manager) TerminateContext() context.Context { - return &ChannelContext{ - done: g.IsTerminate(), - err: ErrTerminate, - } + return g.terminateCtx } diff --git a/modules/graceful/manager.go b/modules/graceful/manager.go index 903d05ed21f41..8c3b95c4aa74d 100644 --- a/modules/graceful/manager.go +++ b/modules/graceful/manager.go @@ -54,8 +54,8 @@ func InitManager(ctx context.Context) { }) } -// CallbackWithContext is combined runnable and context to watch to see if the caller has finished -type CallbackWithContext func(ctx context.Context, callback func()) +// WithCallback is a runnable to call when the caller has finished +type WithCallback func(callback func()) // RunnableWithShutdownFns is a runnable with functions to run at shutdown and terminate // After the callback to atShutdown is called and is complete, the main function must return. @@ -63,7 +63,7 @@ type CallbackWithContext func(ctx context.Context, callback func()) // Please note that use of the atShutdown and atTerminate callbacks will create go-routines that will wait till their respective signals // - users must therefore be careful to only call these as necessary. // If run is not expected to run indefinitely RunWithShutdownChan is likely to be more appropriate. -type RunnableWithShutdownFns func(atShutdown, atTerminate func(context.Context, func())) +type RunnableWithShutdownFns func(atShutdown, atTerminate func(func())) // RunWithShutdownFns takes a function that has both atShutdown and atTerminate callbacks // After the callback to atShutdown is called and is complete, the main function must return. @@ -80,17 +80,21 @@ func (g *Manager) RunWithShutdownFns(run RunnableWithShutdownFns) { g.doShutdown() } }() - run(func(ctx context.Context, atShutdown func()) { - go func() { - select { - case <-g.IsShutdown(): + run(func(atShutdown func()) { + g.lock.Lock() + defer g.lock.Unlock() + g.toRunAtShutdown = append(g.toRunAtShutdown, + func() { + defer func() { + if err := recover(); err != nil { + log.Critical("PANIC during RunWithShutdownFns: %v\nStacktrace: %s", err, log.Stack(2)) + g.doShutdown() + } + }() atShutdown() - case <-ctx.Done(): - return - } - }() - }, func(ctx context.Context, atTerminate func()) { - g.RunAtTerminate(ctx, atTerminate) + }) + }, func(atTerminate func()) { + g.RunAtTerminate(atTerminate) }) } @@ -99,7 +103,7 @@ func (g *Manager) RunWithShutdownFns(run RunnableWithShutdownFns) { // (Optionally IsHammer may be waited for instead however, this should be avoided if possible.) // The callback function provided to atTerminate must return once termination is complete. // Please note that use of the atTerminate function will create a go-routine that will wait till terminate - users must therefore be careful to only call this as necessary. -type RunnableWithShutdownChan func(atShutdown <-chan struct{}, atTerminate CallbackWithContext) +type RunnableWithShutdownChan func(atShutdown <-chan struct{}, atTerminate WithCallback) // RunWithShutdownChan takes a function that has channel to watch for shutdown and atTerminate callbacks // After the atShutdown channel is closed, the main function must return once shutdown is complete. @@ -115,8 +119,8 @@ func (g *Manager) RunWithShutdownChan(run RunnableWithShutdownChan) { g.doShutdown() } }() - run(g.IsShutdown(), func(ctx context.Context, atTerminate func()) { - g.RunAtTerminate(ctx, atTerminate) + run(g.IsShutdown(), func(atTerminate func()) { + g.RunAtTerminate(atTerminate) }) } @@ -136,60 +140,65 @@ func (g *Manager) RunWithShutdownContext(run func(context.Context)) { } // RunAtTerminate adds to the terminate wait group and creates a go-routine to run the provided function at termination -func (g *Manager) RunAtTerminate(ctx context.Context, terminate func()) { +func (g *Manager) RunAtTerminate(terminate func()) { g.terminateWaitGroup.Add(1) - go func() { - defer g.terminateWaitGroup.Done() - defer func() { - if err := recover(); err != nil { - log.Critical("PANIC during RunAtTerminate: %v\nStacktrace: %s", err, log.Stack(2)) - } - }() - select { - case <-g.IsTerminate(): + g.lock.Lock() + defer g.lock.Unlock() + g.toRunAtTerminate = append(g.toRunAtTerminate, + func() { + defer g.terminateWaitGroup.Done() + defer func() { + if err := recover(); err != nil { + log.Critical("PANIC during RunAtTerminate: %v\nStacktrace: %s", err, log.Stack(2)) + } + }() terminate() - case <-ctx.Done(): - } - }() + }) } // RunAtShutdown creates a go-routine to run the provided function at shutdown func (g *Manager) RunAtShutdown(ctx context.Context, shutdown func()) { - go func() { - defer func() { - if err := recover(); err != nil { - log.Critical("PANIC during RunAtShutdown: %v\nStacktrace: %s", err, log.Stack(2)) + g.lock.Lock() + defer g.lock.Unlock() + g.toRunAtShutdown = append(g.toRunAtShutdown, + func() { + defer func() { + if err := recover(); err != nil { + log.Critical("PANIC during RunAtShutdown: %v\nStacktrace: %s", err, log.Stack(2)) + } + }() + select { + case <-ctx.Done(): + return + default: + shutdown() } - }() - select { - case <-g.IsShutdown(): - shutdown() - case <-ctx.Done(): - } - }() + }) } // RunAtHammer creates a go-routine to run the provided function at shutdown -func (g *Manager) RunAtHammer(ctx context.Context, hammer func()) { - go func() { - defer func() { - if err := recover(); err != nil { - log.Critical("PANIC during RunAtHammer: %v\nStacktrace: %s", err, log.Stack(2)) - } - }() - select { - case <-g.IsHammer(): +func (g *Manager) RunAtHammer(hammer func()) { + g.lock.Lock() + defer g.lock.Unlock() + g.toRunAtHammer = append(g.toRunAtHammer, + func() { + defer func() { + if err := recover(); err != nil { + log.Critical("PANIC during RunAtHammer: %v\nStacktrace: %s", err, log.Stack(2)) + } + }() hammer() - case <-ctx.Done(): - } - }() + }) } func (g *Manager) doShutdown() { if !g.setStateTransition(stateRunning, stateShuttingDown) { return } g.lock.Lock() - close(g.shutdown) + g.shutdownCtxCancel() + for _, fn := range g.toRunAtShutdown { + go fn() + } g.lock.Unlock() if setting.GracefulHammerTime >= 0 { @@ -203,7 +212,7 @@ func (g *Manager) doShutdown() { g.doTerminate() g.WaitForTerminate() g.lock.Lock() - close(g.done) + g.doneCtxCancel() g.lock.Unlock() }() } @@ -212,10 +221,13 @@ func (g *Manager) doHammerTime(d time.Duration) { time.Sleep(d) g.lock.Lock() select { - case <-g.hammer: + case <-g.hammerCtx.Done(): default: log.Warn("Setting Hammer condition") - close(g.hammer) + g.hammerCtxCancel() + for _, fn := range g.toRunAtHammer { + go fn() + } } g.lock.Unlock() } @@ -226,10 +238,13 @@ func (g *Manager) doTerminate() { } g.lock.Lock() select { - case <-g.terminate: + case <-g.terminateCtx.Done(): default: log.Warn("Terminating") - close(g.terminate) + g.terminateCtxCancel() + for _, fn := range g.toRunAtTerminate { + go fn() + } } g.lock.Unlock() } @@ -242,7 +257,7 @@ func (g *Manager) IsChild() bool { // IsShutdown returns a channel which will be closed at shutdown. // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate func (g *Manager) IsShutdown() <-chan struct{} { - return g.shutdown + return g.shutdownCtx.Done() } // IsHammer returns a channel which will be closed at hammer @@ -250,14 +265,14 @@ func (g *Manager) IsShutdown() <-chan struct{} { // Servers running within the running server wait group should respond to IsHammer // if not shutdown already func (g *Manager) IsHammer() <-chan struct{} { - return g.hammer + return g.hammerCtx.Done() } // IsTerminate returns a channel which will be closed at terminate // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate // IsTerminate will only close once all running servers have stopped func (g *Manager) IsTerminate() <-chan struct{} { - return g.terminate + return g.terminateCtx.Done() } // ServerDone declares a running server done and subtracts one from the @@ -314,25 +329,20 @@ func (g *Manager) InformCleanup() { // Done allows the manager to be viewed as a context.Context, it returns a channel that is closed when the server is finished terminating func (g *Manager) Done() <-chan struct{} { - return g.done + return g.doneCtx.Done() } -// Err allows the manager to be viewed as a context.Context done at Terminate, it returns ErrTerminate +// Err allows the manager to be viewed as a context.Context done at Terminate func (g *Manager) Err() error { - select { - case <-g.Done(): - return ErrTerminate - default: - return nil - } + return g.doneCtx.Err() } -// Value allows the manager to be viewed as a context.Context done at Terminate, it has no values +// Value allows the manager to be viewed as a context.Context done at Terminate func (g *Manager) Value(key interface{}) interface{} { - return nil + return g.doneCtx.Value(key) } // Deadline returns nil as there is no fixed Deadline for the manager, it allows the manager to be viewed as a context.Context func (g *Manager) Deadline() (deadline time.Time, ok bool) { - return + return g.doneCtx.Deadline() } diff --git a/modules/graceful/manager_unix.go b/modules/graceful/manager_unix.go index 540974454c34c..20d9b3905c4fb 100644 --- a/modules/graceful/manager_unix.go +++ b/modules/graceful/manager_unix.go @@ -25,13 +25,21 @@ type Manager struct { forked bool lock *sync.RWMutex state state - shutdown chan struct{} - hammer chan struct{} - terminate chan struct{} - done chan struct{} + shutdownCtx context.Context + hammerCtx context.Context + terminateCtx context.Context + doneCtx context.Context + shutdownCtxCancel context.CancelFunc + hammerCtxCancel context.CancelFunc + terminateCtxCancel context.CancelFunc + doneCtxCancel context.CancelFunc runningServerWaitGroup sync.WaitGroup createServerWaitGroup sync.WaitGroup terminateWaitGroup sync.WaitGroup + + toRunAtShutdown []func() + toRunAtHammer []func() + toRunAtTerminate []func() } func newGracefulManager(ctx context.Context) *Manager { @@ -45,11 +53,11 @@ func newGracefulManager(ctx context.Context) *Manager { } func (g *Manager) start(ctx context.Context) { - // Make channels - g.terminate = make(chan struct{}) - g.shutdown = make(chan struct{}) - g.hammer = make(chan struct{}) - g.done = make(chan struct{}) + // Make contexts + g.terminateCtx, g.terminateCtxCancel = context.WithCancel(ctx) + g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(ctx) + g.hammerCtx, g.hammerCtxCancel = context.WithCancel(ctx) + g.doneCtx, g.doneCtxCancel = context.WithCancel(ctx) // Set the running state & handle signals g.setState(stateRunning) diff --git a/modules/graceful/manager_windows.go b/modules/graceful/manager_windows.go index 14923c2a9b7d2..51f29778ba7af 100644 --- a/modules/graceful/manager_windows.go +++ b/modules/graceful/manager_windows.go @@ -36,14 +36,22 @@ type Manager struct { isChild bool lock *sync.RWMutex state state - shutdown chan struct{} - hammer chan struct{} - terminate chan struct{} - done chan struct{} + shutdownCtx context.Context + hammerCtx context.Context + terminateCtx context.Context + doneCtx context.Context + shutdownCtxCancel context.CancelFunc + hammerCtxCancel context.CancelFunc + terminateCtxCancel context.CancelFunc + doneCtxCancel context.CancelFunc runningServerWaitGroup sync.WaitGroup createServerWaitGroup sync.WaitGroup terminateWaitGroup sync.WaitGroup shutdownRequested chan struct{} + + toRunAtShutdown []func() + toRunAtHammer []func() + toRunAtTerminate []func() } func newGracefulManager(ctx context.Context) *Manager { @@ -58,11 +66,13 @@ func newGracefulManager(ctx context.Context) *Manager { } func (g *Manager) start() { + // Make contexts + g.terminateCtx, g.terminateCtxCancel = context.WithCancel(g.ctx) + g.shutdownCtx, g.shutdownCtxCancel = context.WithCancel(g.ctx) + g.hammerCtx, g.hammerCtxCancel = context.WithCancel(g.ctx) + g.doneCtx, g.doneCtxCancel = context.WithCancel(g.ctx) + // Make channels - g.terminate = make(chan struct{}) - g.shutdown = make(chan struct{}) - g.hammer = make(chan struct{}) - g.done = make(chan struct{}) g.shutdownRequested = make(chan struct{}) // Set the running state @@ -171,7 +181,7 @@ hammerLoop: default: log.Debug("Unexpected control request: %v", change.Cmd) } - case <-g.hammer: + case <-g.hammerCtx.Done(): break hammerLoop } } diff --git a/modules/indexer/code/indexer.go b/modules/indexer/code/indexer.go index a7d78e9fdc82c..67fa43eda89dc 100644 --- a/modules/indexer/code/indexer.go +++ b/modules/indexer/code/indexer.go @@ -115,7 +115,13 @@ func Init() { ctx, cancel := context.WithCancel(context.Background()) - graceful.GetManager().RunAtTerminate(ctx, func() { + graceful.GetManager().RunAtTerminate(func() { + select { + case <-ctx.Done(): + return + default: + } + cancel() log.Debug("Closing repository indexer") indexer.Close() log.Info("PID: %d Repository Indexer closed", os.Getpid()) diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 9edaef6bdd017..676b6686ea5b2 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -160,7 +160,7 @@ func InitIssueIndexer(syncReindex bool) { } populate = !exist holder.set(issueIndexer) - graceful.GetManager().RunAtTerminate(context.Background(), func() { + graceful.GetManager().RunAtTerminate(func() { log.Debug("Closing issue indexer") issueIndexer := holder.get() if issueIndexer != nil { @@ -170,7 +170,7 @@ func InitIssueIndexer(syncReindex bool) { }) log.Debug("Created Bleve Indexer") case "elasticsearch": - graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(context.Context, func())) { + graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(func())) { issueIndexer, err := NewElasticSearchIndexer(setting.Indexer.IssueConnStr, setting.Indexer.IssueIndexerName) if err != nil { log.Fatal("Unable to initialize Elastic Search Issue Indexer at connection: %s Error: %v", setting.Indexer.IssueConnStr, err) diff --git a/modules/migrations/gitea_uploader.go b/modules/migrations/gitea_uploader.go index bd6084d6a1678..8b865d7971522 100644 --- a/modules/migrations/gitea_uploader.go +++ b/modules/migrations/gitea_uploader.go @@ -250,14 +250,16 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { rel.OriginalAuthorID = release.PublisherID } - // calc NumCommits - commit, err := g.gitRepo.GetCommit(rel.TagName) - if err != nil { - return fmt.Errorf("GetCommit: %v", err) - } - rel.NumCommits, err = commit.CommitsCount() - if err != nil { - return fmt.Errorf("CommitsCount: %v", err) + // calc NumCommits if no draft + if !release.Draft { + commit, err := g.gitRepo.GetCommit(rel.TagName) + if err != nil { + return fmt.Errorf("GetCommit: %v", err) + } + rel.NumCommits, err = commit.CommitsCount() + if err != nil { + return fmt.Errorf("CommitsCount: %v", err) + } } for _, asset := range release.Assets { @@ -270,9 +272,10 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { } // download attachment - err = func() error { + err := func() error { // asset.DownloadURL maybe a local file var rc io.ReadCloser + var err error if asset.DownloadURL == nil { rc, err = asset.DownloadFunc() if err != nil { @@ -851,6 +854,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { // Rollback when migrating failed, this will rollback all the changes. func (g *GiteaLocalUploader) Rollback() error { if g.repo != nil && g.repo.ID > 0 { + g.gitRepo.Close() if err := models.DeleteRepository(g.doer, g.repo.OwnerID, g.repo.ID); err != nil { return err } diff --git a/modules/migrations/github.go b/modules/migrations/github.go index 282e3b4786151..8a3f5d34c78d0 100644 --- a/modules/migrations/github.go +++ b/modules/migrations/github.go @@ -264,34 +264,29 @@ func (g *GithubDownloaderV3) GetLabels() ([]*base.Label, error) { } func (g *GithubDownloaderV3) convertGithubRelease(rel *github.RepositoryRelease) *base.Release { - var ( - name string - desc string - ) - if rel.Body != nil { - desc = *rel.Body - } - if rel.Name != nil { - name = *rel.Name - } - - var email string - if rel.Author.Email != nil { - email = *rel.Author.Email - } - r := &base.Release{ TagName: *rel.TagName, TargetCommitish: *rel.TargetCommitish, - Name: name, - Body: desc, Draft: *rel.Draft, Prerelease: *rel.Prerelease, Created: rel.CreatedAt.Time, PublisherID: *rel.Author.ID, PublisherName: *rel.Author.Login, - PublisherEmail: email, - Published: rel.PublishedAt.Time, + } + + if rel.Body != nil { + r.Body = *rel.Body + } + if rel.Name != nil { + r.Name = *rel.Name + } + + if rel.Author.Email != nil { + r.PublisherEmail = *rel.Author.Email + } + + if rel.PublishedAt != nil { + r.Published = rel.PublishedAt.Time } for _, asset := range rel.Assets { @@ -306,18 +301,17 @@ func (g *GithubDownloaderV3) convertGithubRelease(rel *github.RepositoryRelease) Updated: asset.UpdatedAt.Time, DownloadFunc: func() (io.ReadCloser, error) { g.sleep() - asset, redir, err := g.client.Repositories.DownloadReleaseAsset(g.ctx, g.repoOwner, g.repoName, assetID, nil) + asset, redirectURL, err := g.client.Repositories.DownloadReleaseAsset(g.ctx, g.repoOwner, g.repoName, assetID, nil) if err != nil { return nil, err } - err = g.RefreshRate() - if err != nil { + if err := g.RefreshRate(); err != nil { log.Error("g.client.RateLimits: %s", err) } if asset == nil { - if redir != "" { + if redirectURL != "" { g.sleep() - req, err := http.NewRequestWithContext(g.ctx, "GET", redir, nil) + req, err := http.NewRequestWithContext(g.ctx, "GET", redirectURL, nil) if err != nil { return nil, err } diff --git a/modules/queue/bytefifo.go b/modules/queue/bytefifo.go index 94478e6f05c4b..3a10c8e1259c6 100644 --- a/modules/queue/bytefifo.go +++ b/modules/queue/bytefifo.go @@ -4,14 +4,16 @@ package queue +import "context" + // ByteFIFO defines a FIFO that takes a byte array type ByteFIFO interface { // Len returns the length of the fifo - Len() int64 + Len(ctx context.Context) int64 // PushFunc pushes data to the end of the fifo and calls the callback if it is added - PushFunc(data []byte, fn func() error) error + PushFunc(ctx context.Context, data []byte, fn func() error) error // Pop pops data from the start of the fifo - Pop() ([]byte, error) + Pop(ctx context.Context) ([]byte, error) // Close this fifo Close() error } @@ -20,7 +22,7 @@ type ByteFIFO interface { type UniqueByteFIFO interface { ByteFIFO // Has returns whether the fifo contains this data - Has(data []byte) (bool, error) + Has(ctx context.Context, data []byte) (bool, error) } var _ ByteFIFO = &DummyByteFIFO{} @@ -29,12 +31,12 @@ var _ ByteFIFO = &DummyByteFIFO{} type DummyByteFIFO struct{} // PushFunc returns nil -func (*DummyByteFIFO) PushFunc(data []byte, fn func() error) error { +func (*DummyByteFIFO) PushFunc(ctx context.Context, data []byte, fn func() error) error { return nil } // Pop returns nil -func (*DummyByteFIFO) Pop() ([]byte, error) { +func (*DummyByteFIFO) Pop(ctx context.Context) ([]byte, error) { return []byte{}, nil } @@ -44,7 +46,7 @@ func (*DummyByteFIFO) Close() error { } // Len is always 0 -func (*DummyByteFIFO) Len() int64 { +func (*DummyByteFIFO) Len(ctx context.Context) int64 { return 0 } @@ -56,6 +58,6 @@ type DummyUniqueByteFIFO struct { } // Has always returns false -func (*DummyUniqueByteFIFO) Has([]byte) (bool, error) { +func (*DummyUniqueByteFIFO) Has(ctx context.Context, data []byte) (bool, error) { return false, nil } diff --git a/modules/queue/manager.go b/modules/queue/manager.go index c3ec735af504d..a6d48575ab674 100644 --- a/modules/queue/manager.go +++ b/modules/queue/manager.go @@ -187,14 +187,14 @@ func (m *Manager) FlushAll(baseCtx context.Context, timeout time.Duration) error if flushable, ok := mq.Managed.(Flushable); ok { log.Debug("Flushing (flushable) queue: %s", mq.Name) go func(q *ManagedQueue) { - localCtx, localCancel := context.WithCancel(ctx) - pid := q.RegisterWorkers(1, start, hasTimeout, end, localCancel, true) + localCtx, localCtxCancel := context.WithCancel(ctx) + pid := q.RegisterWorkers(1, start, hasTimeout, end, localCtxCancel, true) err := flushable.FlushWithContext(localCtx) if err != nil && err != ctx.Err() { cancel() } q.CancelWorkers(pid) - localCancel() + localCtxCancel() wg.Done() }(mq) } else { diff --git a/modules/queue/queue.go b/modules/queue/queue.go index d08cba35a1ea5..7159048c11689 100644 --- a/modules/queue/queue.go +++ b/modules/queue/queue.go @@ -57,7 +57,7 @@ type Named interface { // Queues will handle their own contents in the Run method type Queue interface { Flushable - Run(atShutdown, atTerminate func(context.Context, func())) + Run(atShutdown, atTerminate func(func())) Push(Data) error } @@ -74,7 +74,7 @@ type DummyQueue struct { } // Run does nothing -func (*DummyQueue) Run(_, _ func(context.Context, func())) {} +func (*DummyQueue) Run(_, _ func(func())) {} // Push fakes a push of data to the queue func (*DummyQueue) Push(Data) error { @@ -122,7 +122,7 @@ type Immediate struct { } // Run does nothing -func (*Immediate) Run(_, _ func(context.Context, func())) {} +func (*Immediate) Run(_, _ func(func())) {} // Push fakes a push of data to the queue func (q *Immediate) Push(data Data) error { diff --git a/modules/queue/queue_bytefifo.go b/modules/queue/queue_bytefifo.go index fe1fb7807e831..3ea61aad0e4c5 100644 --- a/modules/queue/queue_bytefifo.go +++ b/modules/queue/queue_bytefifo.go @@ -17,8 +17,9 @@ import ( // ByteFIFOQueueConfiguration is the configuration for a ByteFIFOQueue type ByteFIFOQueueConfiguration struct { WorkerPoolConfiguration - Workers int - Name string + Workers int + Name string + WaitOnEmpty bool } var _ Queue = &ByteFIFOQueue{} @@ -26,14 +27,18 @@ var _ Queue = &ByteFIFOQueue{} // ByteFIFOQueue is a Queue formed from a ByteFIFO and WorkerPool type ByteFIFOQueue struct { *WorkerPool - byteFIFO ByteFIFO - typ Type - closed chan struct{} - terminated chan struct{} - exemplar interface{} - workers int - name string - lock sync.Mutex + byteFIFO ByteFIFO + typ Type + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc + terminateCtx context.Context + terminateCtxCancel context.CancelFunc + exemplar interface{} + workers int + name string + lock sync.Mutex + waitOnEmpty bool + pushed chan struct{} } // NewByteFIFOQueue creates a new ByteFIFOQueue @@ -44,15 +49,22 @@ func NewByteFIFOQueue(typ Type, byteFIFO ByteFIFO, handle HandlerFunc, cfg, exem } config := configInterface.(ByteFIFOQueueConfiguration) + terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) + shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) + return &ByteFIFOQueue{ - WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), - byteFIFO: byteFIFO, - typ: typ, - closed: make(chan struct{}), - terminated: make(chan struct{}), - exemplar: exemplar, - workers: config.Workers, - name: config.Name, + WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), + byteFIFO: byteFIFO, + typ: typ, + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCtxCancel, + terminateCtx: terminateCtx, + terminateCtxCancel: terminateCtxCancel, + exemplar: exemplar, + workers: config.Workers, + name: config.Name, + waitOnEmpty: config.WaitOnEmpty, + pushed: make(chan struct{}, 1), }, nil } @@ -76,7 +88,15 @@ func (q *ByteFIFOQueue) PushFunc(data Data, fn func() error) error { if err != nil { return err } - return q.byteFIFO.PushFunc(bs, fn) + if q.waitOnEmpty { + defer func() { + select { + case q.pushed <- struct{}{}: + default: + } + }() + } + return q.byteFIFO.PushFunc(q.terminateCtx, bs, fn) } // IsEmpty checks if the queue is empty @@ -86,135 +106,160 @@ func (q *ByteFIFOQueue) IsEmpty() bool { if !q.WorkerPool.IsEmpty() { return false } - return q.byteFIFO.Len() == 0 + return q.byteFIFO.Len(q.terminateCtx) == 0 } // Run runs the bytefifo queue -func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(context.Context, func())) { - atShutdown(context.Background(), q.Shutdown) - atTerminate(context.Background(), q.Terminate) +func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(func())) { + atShutdown(q.Shutdown) + atTerminate(q.Terminate) log.Debug("%s: %s Starting", q.typ, q.name) - go func() { - _ = q.AddWorkers(q.workers, 0) - }() + _ = q.AddWorkers(q.workers, 0) - go q.readToChan() + log.Trace("%s: %s Now running", q.typ, q.name) + q.readToChan() - log.Trace("%s: %s Waiting til closed", q.typ, q.name) - <-q.closed + <-q.shutdownCtx.Done() log.Trace("%s: %s Waiting til done", q.typ, q.name) q.Wait() log.Trace("%s: %s Waiting til cleaned", q.typ, q.name) - ctx, cancel := context.WithCancel(context.Background()) - atTerminate(ctx, cancel) - q.CleanUp(ctx) - cancel() + q.CleanUp(q.terminateCtx) + q.terminateCtxCancel() } +const maxBackOffTime = time.Second * 3 + func (q *ByteFIFOQueue) readToChan() { // handle quick cancels select { - case <-q.closed: + case <-q.shutdownCtx.Done(): // tell the pool to shutdown. - q.cancel() + q.baseCtxCancel() return default: } + // Default backoff values backOffTime := time.Millisecond * 100 - maxBackOffTime := time.Second * 3 - for { - success, resetBackoff := q.doPop() - if resetBackoff { - backOffTime = 100 * time.Millisecond - } - if success { +loop: + for { + err := q.doPop() + if err == errQueueEmpty { + log.Trace("%s: %s Waiting on Empty", q.typ, q.name) select { - case <-q.closed: - // tell the pool to shutdown. - q.cancel() + case <-q.pushed: + // reset backOffTime + backOffTime = 100 * time.Millisecond + continue loop + case <-q.shutdownCtx.Done(): + // Oops we've been shutdown whilst waiting + // Make sure the worker pool is shutdown too + q.baseCtxCancel() return - default: } - } else { + } + + // Reset the backOffTime if there is no error or an unmarshalError + if err == nil || err == errUnmarshal { + backOffTime = 100 * time.Millisecond + } + + if err != nil { + // Need to Backoff select { - case <-q.closed: - // tell the pool to shutdown. - q.cancel() + case <-q.shutdownCtx.Done(): + // Oops we've been shutdown whilst backing off + // Make sure the worker pool is shutdown too + q.baseCtxCancel() return case <-time.After(backOffTime): - } - backOffTime += backOffTime / 2 - if backOffTime > maxBackOffTime { - backOffTime = maxBackOffTime + // OK we've waited - so backoff a bit + backOffTime += backOffTime / 2 + if backOffTime > maxBackOffTime { + backOffTime = maxBackOffTime + } + continue loop } } + select { + case <-q.shutdownCtx.Done(): + // Oops we've been shutdown + // Make sure the worker pool is shutdown too + q.baseCtxCancel() + return + default: + continue loop + } } } -func (q *ByteFIFOQueue) doPop() (success, resetBackoff bool) { +var errQueueEmpty = fmt.Errorf("empty queue") +var errEmptyBytes = fmt.Errorf("empty bytes") +var errUnmarshal = fmt.Errorf("failed to unmarshal") + +func (q *ByteFIFOQueue) doPop() error { q.lock.Lock() defer q.lock.Unlock() - bs, err := q.byteFIFO.Pop() + bs, err := q.byteFIFO.Pop(q.shutdownCtx) if err != nil { + if err == context.Canceled { + q.baseCtxCancel() + return err + } log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err) - return + return err } if len(bs) == 0 { - return + if q.waitOnEmpty && q.byteFIFO.Len(q.shutdownCtx) == 0 { + return errQueueEmpty + } + return errEmptyBytes } - resetBackoff = true - data, err := unmarshalAs(bs, q.exemplar) if err != nil { log.Error("%s: %s Failed to unmarshal with error: %v", q.typ, q.name, err) - return + return errUnmarshal } log.Trace("%s %s: Task found: %#v", q.typ, q.name, data) q.WorkerPool.Push(data) - success = true - return + return nil } // Shutdown processing from this queue func (q *ByteFIFOQueue) Shutdown() { log.Trace("%s: %s Shutting down", q.typ, q.name) - q.lock.Lock() select { - case <-q.closed: + case <-q.shutdownCtx.Done(): + return default: - close(q.closed) } - q.lock.Unlock() + q.shutdownCtxCancel() log.Debug("%s: %s Shutdown", q.typ, q.name) } // IsShutdown returns a channel which is closed when this Queue is shutdown func (q *ByteFIFOQueue) IsShutdown() <-chan struct{} { - return q.closed + return q.shutdownCtx.Done() } // Terminate this queue and close the queue func (q *ByteFIFOQueue) Terminate() { log.Trace("%s: %s Terminating", q.typ, q.name) q.Shutdown() - q.lock.Lock() select { - case <-q.terminated: - q.lock.Unlock() + case <-q.terminateCtx.Done(): return default: } - close(q.terminated) - q.lock.Unlock() if log.IsDebug() { - log.Debug("%s: %s Closing with %d tasks left in queue", q.typ, q.name, q.byteFIFO.Len()) + log.Debug("%s: %s Closing with %d tasks left in queue", q.typ, q.name, q.byteFIFO.Len(q.terminateCtx)) } + q.terminateCtxCancel() if err := q.byteFIFO.Close(); err != nil { log.Error("Error whilst closing internal byte fifo in %s: %s: %v", q.typ, q.name, err) } @@ -223,7 +268,7 @@ func (q *ByteFIFOQueue) Terminate() { // IsTerminated returns a channel which is closed when this Queue is terminated func (q *ByteFIFOQueue) IsTerminated() <-chan struct{} { - return q.terminated + return q.terminateCtx.Done() } var _ UniqueQueue = &ByteFIFOUniqueQueue{} @@ -240,17 +285,21 @@ func NewByteFIFOUniqueQueue(typ Type, byteFIFO UniqueByteFIFO, handle HandlerFun return nil, err } config := configInterface.(ByteFIFOQueueConfiguration) + terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) + shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) return &ByteFIFOUniqueQueue{ ByteFIFOQueue: ByteFIFOQueue{ - WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), - byteFIFO: byteFIFO, - typ: typ, - closed: make(chan struct{}), - terminated: make(chan struct{}), - exemplar: exemplar, - workers: config.Workers, - name: config.Name, + WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), + byteFIFO: byteFIFO, + typ: typ, + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCtxCancel, + terminateCtx: terminateCtx, + terminateCtxCancel: terminateCtxCancel, + exemplar: exemplar, + workers: config.Workers, + name: config.Name, }, }, nil } @@ -265,5 +314,5 @@ func (q *ByteFIFOUniqueQueue) Has(data Data) (bool, error) { if err != nil { return false, err } - return q.byteFIFO.(UniqueByteFIFO).Has(bs) + return q.byteFIFO.(UniqueByteFIFO).Has(q.terminateCtx, bs) } diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go index d7a11e79f5dc6..4df64b69ee5ee 100644 --- a/modules/queue/queue_channel.go +++ b/modules/queue/queue_channel.go @@ -27,9 +27,13 @@ type ChannelQueueConfiguration struct { // It is basically a very thin wrapper around a WorkerPool type ChannelQueue struct { *WorkerPool - exemplar interface{} - workers int - name string + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc + terminateCtx context.Context + terminateCtxCancel context.CancelFunc + exemplar interface{} + workers int + name string } // NewChannelQueue creates a memory channel queue @@ -42,28 +46,30 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro if config.BatchLength == 0 { config.BatchLength = 1 } + + terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) + shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) + queue := &ChannelQueue{ - WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), - exemplar: exemplar, - workers: config.Workers, - name: config.Name, + WorkerPool: NewWorkerPool(handle, config.WorkerPoolConfiguration), + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCtxCancel, + terminateCtx: terminateCtx, + terminateCtxCancel: terminateCtxCancel, + exemplar: exemplar, + workers: config.Workers, + name: config.Name, } queue.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar) return queue, nil } // Run starts to run the queue -func (q *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { - atShutdown(context.Background(), func() { - log.Warn("ChannelQueue: %s is not shutdownable!", q.name) - }) - atTerminate(context.Background(), func() { - log.Warn("ChannelQueue: %s is not terminatable!", q.name) - }) +func (q *ChannelQueue) Run(atShutdown, atTerminate func(func())) { + atShutdown(q.Shutdown) + atTerminate(q.Terminate) log.Debug("ChannelQueue: %s Starting", q.name) - go func() { - _ = q.AddWorkers(q.workers, 0) - }() + _ = q.AddWorkers(q.workers, 0) } // Push will push data into the queue @@ -75,6 +81,42 @@ func (q *ChannelQueue) Push(data Data) error { return nil } +// Shutdown processing from this queue +func (q *ChannelQueue) Shutdown() { + q.lock.Lock() + defer q.lock.Unlock() + select { + case <-q.shutdownCtx.Done(): + log.Trace("ChannelQueue: %s Already Shutting down", q.name) + return + default: + } + log.Trace("ChannelQueue: %s Shutting down", q.name) + go func() { + log.Trace("ChannelQueue: %s Flushing", q.name) + if err := q.FlushWithContext(q.terminateCtx); err != nil { + log.Warn("ChannelQueue: %s Terminated before completed flushing", q.name) + return + } + log.Debug("ChannelQueue: %s Flushed", q.name) + }() + q.shutdownCtxCancel() + log.Debug("ChannelQueue: %s Shutdown", q.name) +} + +// Terminate this queue and close the queue +func (q *ChannelQueue) Terminate() { + log.Trace("ChannelQueue: %s Terminating", q.name) + q.Shutdown() + select { + case <-q.terminateCtx.Done(): + return + default: + } + q.terminateCtxCancel() + log.Debug("ChannelQueue: %s Terminated", q.name) +} + // Name returns the name of this queue func (q *ChannelQueue) Name() string { return q.name diff --git a/modules/queue/queue_channel_test.go b/modules/queue/queue_channel_test.go index bca81d50fdaea..e7abe5b50b764 100644 --- a/modules/queue/queue_channel_test.go +++ b/modules/queue/queue_channel_test.go @@ -5,7 +5,6 @@ package queue import ( - "context" "testing" "time" @@ -21,7 +20,7 @@ func TestChannelQueue(t *testing.T) { } } - nilFn := func(_ context.Context, _ func()) {} + nilFn := func(_ func()) {} queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{ @@ -61,7 +60,7 @@ func TestChannelQueue_Batch(t *testing.T) { } } - nilFn := func(_ context.Context, _ func()) {} + nilFn := func(_ func()) {} queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{ diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index 6c15a8e63be29..911233a5d9a01 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -5,6 +5,8 @@ package queue import ( + "context" + "code.gitea.io/gitea/modules/nosql" "gitea.com/lunny/levelqueue" @@ -37,6 +39,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) if len(config.ConnectionString) == 0 { config.ConnectionString = config.DataDir } + config.WaitOnEmpty = true byteFIFO, err := NewLevelQueueByteFIFO(config.ConnectionString, config.QueueName) if err != nil { @@ -82,7 +85,7 @@ func NewLevelQueueByteFIFO(connection, prefix string) (*LevelQueueByteFIFO, erro } // PushFunc will push data into the fifo -func (fifo *LevelQueueByteFIFO) PushFunc(data []byte, fn func() error) error { +func (fifo *LevelQueueByteFIFO) PushFunc(ctx context.Context, data []byte, fn func() error) error { if fn != nil { if err := fn(); err != nil { return err @@ -92,7 +95,7 @@ func (fifo *LevelQueueByteFIFO) PushFunc(data []byte, fn func() error) error { } // Pop pops data from the start of the fifo -func (fifo *LevelQueueByteFIFO) Pop() ([]byte, error) { +func (fifo *LevelQueueByteFIFO) Pop(ctx context.Context) ([]byte, error) { data, err := fifo.internal.RPop() if err != nil && err != levelqueue.ErrNotFound { return nil, err @@ -108,7 +111,7 @@ func (fifo *LevelQueueByteFIFO) Close() error { } // Len returns the length of the fifo -func (fifo *LevelQueueByteFIFO) Len() int64 { +func (fifo *LevelQueueByteFIFO) Len(ctx context.Context) int64 { return fifo.internal.Len() } diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index 801fd8a12235c..c3a1c5781ef09 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -133,8 +133,9 @@ func (q *PersistableChannelQueue) Push(data Data) error { } // Run starts to run the queue -func (q *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { +func (q *PersistableChannelQueue) Run(atShutdown, atTerminate func(func())) { log.Debug("PersistableChannelQueue: %s Starting", q.delayedStarter.name) + _ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0) q.lock.Lock() if q.internal == nil { @@ -147,34 +148,32 @@ func (q *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte } else { q.lock.Unlock() } - atShutdown(context.Background(), q.Shutdown) - atTerminate(context.Background(), q.Terminate) + atShutdown(q.Shutdown) + atTerminate(q.Terminate) - // Just run the level queue - we shut it down later - go q.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) - - go func() { - _ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0) - }() + if lq, ok := q.internal.(*LevelQueue); ok && lq.byteFIFO.Len(lq.shutdownCtx) != 0 { + // Just run the level queue - we shut it down once it's flushed + go q.internal.Run(func(_ func()) {}, func(_ func()) {}) + go func() { + for !q.IsEmpty() { + _ = q.internal.Flush(0) + select { + case <-time.After(100 * time.Millisecond): + case <-q.internal.(*LevelQueue).shutdownCtx.Done(): + log.Warn("LevelQueue: %s shut down before completely flushed", q.internal.(*LevelQueue).Name()) + return + } + } + log.Debug("LevelQueue: %s flushed so shutting down", q.internal.(*LevelQueue).Name()) + q.internal.(*LevelQueue).Shutdown() + GetManager().Remove(q.internal.(*LevelQueue).qid) + }() + } else { + log.Debug("PersistableChannelQueue: %s Skipping running the empty level queue", q.delayedStarter.name) + q.internal.(*LevelQueue).Shutdown() + GetManager().Remove(q.internal.(*LevelQueue).qid) + } - log.Trace("PersistableChannelQueue: %s Waiting til closed", q.delayedStarter.name) - <-q.closed - log.Trace("PersistableChannelQueue: %s Cancelling pools", q.delayedStarter.name) - q.channelQueue.cancel() - q.internal.(*LevelQueue).cancel() - log.Trace("PersistableChannelQueue: %s Waiting til done", q.delayedStarter.name) - q.channelQueue.Wait() - q.internal.(*LevelQueue).Wait() - // Redirect all remaining data in the chan to the internal channel - go func() { - log.Trace("PersistableChannelQueue: %s Redirecting remaining data", q.delayedStarter.name) - for data := range q.channelQueue.dataChan { - _ = q.internal.Push(data) - atomic.AddInt64(&q.channelQueue.numInQueue, -1) - } - log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", q.delayedStarter.name) - }() - log.Trace("PersistableChannelQueue: %s Done main loop", q.delayedStarter.name) } // Flush flushes the queue and blocks till the queue is empty @@ -232,16 +231,37 @@ func (q *PersistableChannelQueue) IsEmpty() bool { func (q *PersistableChannelQueue) Shutdown() { log.Trace("PersistableChannelQueue: %s Shutting down", q.delayedStarter.name) q.lock.Lock() - defer q.lock.Unlock() + select { case <-q.closed: + q.lock.Unlock() + return default: - if q.internal != nil { - q.internal.(*LevelQueue).Shutdown() - } - close(q.closed) - log.Debug("PersistableChannelQueue: %s Shutdown", q.delayedStarter.name) } + q.channelQueue.Shutdown() + if q.internal != nil { + q.internal.(*LevelQueue).Shutdown() + } + close(q.closed) + q.lock.Unlock() + + log.Trace("PersistableChannelQueue: %s Cancelling pools", q.delayedStarter.name) + q.channelQueue.baseCtxCancel() + q.internal.(*LevelQueue).baseCtxCancel() + log.Trace("PersistableChannelQueue: %s Waiting til done", q.delayedStarter.name) + q.channelQueue.Wait() + q.internal.(*LevelQueue).Wait() + // Redirect all remaining data in the chan to the internal channel + go func() { + log.Trace("PersistableChannelQueue: %s Redirecting remaining data", q.delayedStarter.name) + for data := range q.channelQueue.dataChan { + _ = q.internal.Push(data) + atomic.AddInt64(&q.channelQueue.numInQueue, -1) + } + log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", q.delayedStarter.name) + }() + + log.Debug("PersistableChannelQueue: %s Shutdown", q.delayedStarter.name) } // Terminate this queue and close the queue @@ -250,6 +270,7 @@ func (q *PersistableChannelQueue) Terminate() { q.Shutdown() q.lock.Lock() defer q.lock.Unlock() + q.channelQueue.Terminate() if q.internal != nil { q.internal.(*LevelQueue).Terminate() } diff --git a/modules/queue/queue_disk_channel_test.go b/modules/queue/queue_disk_channel_test.go index 93061bffc6586..561f98ca907b6 100644 --- a/modules/queue/queue_disk_channel_test.go +++ b/modules/queue/queue_disk_channel_test.go @@ -5,10 +5,8 @@ package queue import ( - "context" "io/ioutil" "testing" - "time" "code.gitea.io/gitea/modules/util" "github.com/stretchr/testify/assert" @@ -32,17 +30,19 @@ func TestPersistableChannelQueue(t *testing.T) { defer util.RemoveAll(tmpDir) queue, err := NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{ - DataDir: tmpDir, - BatchLength: 2, - QueueLength: 20, - Workers: 1, - MaxWorkers: 10, + DataDir: tmpDir, + BatchLength: 2, + QueueLength: 20, + Workers: 1, + BoostWorkers: 0, + MaxWorkers: 10, + Name: "first", }, &testData{}) assert.NoError(t, err) - go queue.Run(func(_ context.Context, shutdown func()) { + go queue.Run(func(shutdown func()) { queueShutdown = append(queueShutdown, shutdown) - }, func(_ context.Context, terminate func()) { + }, func(terminate func()) { queueTerminate = append(queueTerminate, terminate) }) @@ -64,13 +64,18 @@ func TestPersistableChannelQueue(t *testing.T) { assert.Equal(t, test2.TestString, result2.TestString) assert.Equal(t, test2.TestInt, result2.TestInt) + // test1 is a testData not a *testData so will be rejected err = queue.Push(test1) assert.Error(t, err) + // Now shutdown the queue for _, callback := range queueShutdown { callback() } - time.Sleep(200 * time.Millisecond) + + // Wait til it is closed + <-queue.(*PersistableChannelQueue).closed + err = queue.Push(&test1) assert.NoError(t, err) err = queue.Push(&test2) @@ -80,23 +85,33 @@ func TestPersistableChannelQueue(t *testing.T) { assert.Fail(t, "Handler processing should have stopped") default: } + + // terminate the queue for _, callback := range queueTerminate { callback() } + select { + case <-handleChan: + assert.Fail(t, "Handler processing should have stopped") + default: + } + // Reopen queue queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{ - DataDir: tmpDir, - BatchLength: 2, - QueueLength: 20, - Workers: 1, - MaxWorkers: 10, + DataDir: tmpDir, + BatchLength: 2, + QueueLength: 20, + Workers: 1, + BoostWorkers: 0, + MaxWorkers: 10, + Name: "second", }, &testData{}) assert.NoError(t, err) - go queue.Run(func(_ context.Context, shutdown func()) { + go queue.Run(func(shutdown func()) { queueShutdown = append(queueShutdown, shutdown) - }, func(_ context.Context, terminate func()) { + }, func(terminate func()) { queueTerminate = append(queueTerminate, terminate) }) diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go index edaed49a52396..1f884d4f8d76d 100644 --- a/modules/queue/queue_disk_test.go +++ b/modules/queue/queue_disk_test.go @@ -5,7 +5,6 @@ package queue import ( - "context" "io/ioutil" "sync" "testing" @@ -49,11 +48,11 @@ func TestLevelQueue(t *testing.T) { }, &testData{}) assert.NoError(t, err) - go queue.Run(func(_ context.Context, shutdown func()) { + go queue.Run(func(shutdown func()) { lock.Lock() queueShutdown = append(queueShutdown, shutdown) lock.Unlock() - }, func(_ context.Context, terminate func()) { + }, func(terminate func()) { lock.Lock() queueTerminate = append(queueTerminate, terminate) lock.Unlock() @@ -123,11 +122,11 @@ func TestLevelQueue(t *testing.T) { }, &testData{}) assert.NoError(t, err) - go queue.Run(func(_ context.Context, shutdown func()) { + go queue.Run(func(shutdown func()) { lock.Lock() queueShutdown = append(queueShutdown, shutdown) lock.Unlock() - }, func(_ context.Context, terminate func()) { + }, func(terminate func()) { lock.Lock() queueTerminate = append(queueTerminate, terminate) lock.Unlock() diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index af2cc30335b78..a5fb866dc1e11 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -6,7 +6,6 @@ package queue import ( "context" - "fmt" "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" @@ -47,8 +46,6 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) return nil, err } - byteFIFO.ctx = graceful.NewChannelContext(byteFIFOQueue.IsTerminated(), fmt.Errorf("queue has been terminated")) - queue := &RedisQueue{ ByteFIFOQueue: byteFIFOQueue, } @@ -73,8 +70,8 @@ var _ ByteFIFO = &RedisByteFIFO{} // RedisByteFIFO represents a ByteFIFO formed from a redisClient type RedisByteFIFO struct { - ctx context.Context - client redisClient + client redisClient + queueName string } @@ -89,7 +86,6 @@ func NewRedisByteFIFO(config RedisByteFIFOConfiguration) (*RedisByteFIFO, error) fifo := &RedisByteFIFO{ queueName: config.QueueName, } - fifo.ctx = graceful.GetManager().TerminateContext() fifo.client = nosql.GetManager().GetRedisClient(config.ConnectionString) if err := fifo.client.Ping(graceful.GetManager().ShutdownContext()).Err(); err != nil { return nil, err @@ -98,18 +94,18 @@ func NewRedisByteFIFO(config RedisByteFIFOConfiguration) (*RedisByteFIFO, error) } // PushFunc pushes data to the end of the fifo and calls the callback if it is added -func (fifo *RedisByteFIFO) PushFunc(data []byte, fn func() error) error { +func (fifo *RedisByteFIFO) PushFunc(ctx context.Context, data []byte, fn func() error) error { if fn != nil { if err := fn(); err != nil { return err } } - return fifo.client.RPush(fifo.ctx, fifo.queueName, data).Err() + return fifo.client.RPush(ctx, fifo.queueName, data).Err() } // Pop pops data from the start of the fifo -func (fifo *RedisByteFIFO) Pop() ([]byte, error) { - data, err := fifo.client.LPop(fifo.ctx, fifo.queueName).Bytes() +func (fifo *RedisByteFIFO) Pop(ctx context.Context) ([]byte, error) { + data, err := fifo.client.LPop(ctx, fifo.queueName).Bytes() if err == nil || err == redis.Nil { return data, nil } @@ -122,8 +118,8 @@ func (fifo *RedisByteFIFO) Close() error { } // Len returns the length of the fifo -func (fifo *RedisByteFIFO) Len() int64 { - val, err := fifo.client.LLen(fifo.ctx, fifo.queueName).Result() +func (fifo *RedisByteFIFO) Len(ctx context.Context) int64 { + val, err := fifo.client.LLen(ctx, fifo.queueName).Result() if err != nil { log.Error("Error whilst getting length of redis queue %s: Error: %v", fifo.queueName, err) return -1 diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go index 88d64e82464f4..ec30ab0281972 100644 --- a/modules/queue/queue_wrapped.go +++ b/modules/queue/queue_wrapped.go @@ -38,7 +38,7 @@ type delayedStarter struct { } // setInternal must be called with the lock locked. -func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), handle HandlerFunc, exemplar interface{}) error { +func (q *delayedStarter) setInternal(atShutdown func(func()), handle HandlerFunc, exemplar interface{}) error { var ctx context.Context var cancel context.CancelFunc if q.timeout > 0 { @@ -49,9 +49,7 @@ func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), h defer cancel() // Ensure we also stop at shutdown - atShutdown(ctx, func() { - cancel() - }) + atShutdown(cancel) i := 1 for q.internal == nil { @@ -221,7 +219,7 @@ func (q *WrappedQueue) IsEmpty() bool { } // Run starts to run the queue and attempts to create the internal queue -func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())) { +func (q *WrappedQueue) Run(atShutdown, atTerminate func(func())) { log.Debug("WrappedQueue: %s Starting", q.name) q.lock.Lock() if q.internal == nil { diff --git a/modules/queue/unique_queue_channel.go b/modules/queue/unique_queue_channel.go index dec1cfc5c06e3..5bec67c4d355c 100644 --- a/modules/queue/unique_queue_channel.go +++ b/modules/queue/unique_queue_channel.go @@ -28,11 +28,15 @@ type ChannelUniqueQueueConfiguration ChannelQueueConfiguration // only guaranteed whilst the task is waiting in the queue. type ChannelUniqueQueue struct { *WorkerPool - lock sync.Mutex - table map[Data]bool - exemplar interface{} - workers int - name string + lock sync.Mutex + table map[Data]bool + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc + terminateCtx context.Context + terminateCtxCancel context.CancelFunc + exemplar interface{} + workers int + name string } // NewChannelUniqueQueue create a memory channel queue @@ -45,11 +49,19 @@ func NewChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue if config.BatchLength == 0 { config.BatchLength = 1 } + + terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) + shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) + queue := &ChannelUniqueQueue{ - table: map[Data]bool{}, - exemplar: exemplar, - workers: config.Workers, - name: config.Name, + table: map[Data]bool{}, + shutdownCtx: shutdownCtx, + shutdownCtxCancel: shutdownCtxCancel, + terminateCtx: terminateCtx, + terminateCtxCancel: terminateCtxCancel, + exemplar: exemplar, + workers: config.Workers, + name: config.Name, } queue.WorkerPool = NewWorkerPool(func(data ...Data) { for _, datum := range data { @@ -65,17 +77,11 @@ func NewChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue } // Run starts to run the queue -func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(context.Context, func())) { - atShutdown(context.Background(), func() { - log.Warn("ChannelUniqueQueue: %s is not shutdownable!", q.name) - }) - atTerminate(context.Background(), func() { - log.Warn("ChannelUniqueQueue: %s is not terminatable!", q.name) - }) +func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) { + atShutdown(q.Shutdown) + atTerminate(q.Terminate) log.Debug("ChannelUniqueQueue: %s Starting", q.name) - go func() { - _ = q.AddWorkers(q.workers, 0) - }() + _ = q.AddWorkers(q.workers, 0) } // Push will push data into the queue if the data is not already in the queue @@ -122,6 +128,39 @@ func (q *ChannelUniqueQueue) Has(data Data) (bool, error) { return has, nil } +// Shutdown processing from this queue +func (q *ChannelUniqueQueue) Shutdown() { + log.Trace("ChannelUniqueQueue: %s Shutting down", q.name) + select { + case <-q.shutdownCtx.Done(): + return + default: + } + go func() { + log.Trace("ChannelUniqueQueue: %s Flushing", q.name) + if err := q.FlushWithContext(q.terminateCtx); err != nil { + log.Warn("ChannelUniqueQueue: %s Terminated before completed flushing", q.name) + return + } + log.Debug("ChannelUniqueQueue: %s Flushed", q.name) + }() + q.shutdownCtxCancel() + log.Debug("ChannelUniqueQueue: %s Shutdown", q.name) +} + +// Terminate this queue and close the queue +func (q *ChannelUniqueQueue) Terminate() { + log.Trace("ChannelUniqueQueue: %s Terminating", q.name) + q.Shutdown() + select { + case <-q.terminateCtx.Done(): + return + default: + } + q.terminateCtxCancel() + log.Debug("ChannelUniqueQueue: %s Terminated", q.name) +} + // Name returns the name of this queue func (q *ChannelUniqueQueue) Name() string { return q.name diff --git a/modules/queue/unique_queue_disk.go b/modules/queue/unique_queue_disk.go index 8ec8848bc498b..bb0eb7d950c59 100644 --- a/modules/queue/unique_queue_disk.go +++ b/modules/queue/unique_queue_disk.go @@ -5,6 +5,8 @@ package queue import ( + "context" + "code.gitea.io/gitea/modules/nosql" "gitea.com/lunny/levelqueue" @@ -41,6 +43,7 @@ func NewLevelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, if len(config.ConnectionString) == 0 { config.ConnectionString = config.DataDir } + config.WaitOnEmpty = true byteFIFO, err := NewLevelUniqueQueueByteFIFO(config.ConnectionString, config.QueueName) if err != nil { @@ -86,12 +89,12 @@ func NewLevelUniqueQueueByteFIFO(connection, prefix string) (*LevelUniqueQueueBy } // PushFunc pushes data to the end of the fifo and calls the callback if it is added -func (fifo *LevelUniqueQueueByteFIFO) PushFunc(data []byte, fn func() error) error { +func (fifo *LevelUniqueQueueByteFIFO) PushFunc(ctx context.Context, data []byte, fn func() error) error { return fifo.internal.LPushFunc(data, fn) } // Pop pops data from the start of the fifo -func (fifo *LevelUniqueQueueByteFIFO) Pop() ([]byte, error) { +func (fifo *LevelUniqueQueueByteFIFO) Pop(ctx context.Context) ([]byte, error) { data, err := fifo.internal.RPop() if err != nil && err != levelqueue.ErrNotFound { return nil, err @@ -100,12 +103,12 @@ func (fifo *LevelUniqueQueueByteFIFO) Pop() ([]byte, error) { } // Len returns the length of the fifo -func (fifo *LevelUniqueQueueByteFIFO) Len() int64 { +func (fifo *LevelUniqueQueueByteFIFO) Len(ctx context.Context) int64 { return fifo.internal.Len() } // Has returns whether the fifo contains this data -func (fifo *LevelUniqueQueueByteFIFO) Has(data []byte) (bool, error) { +func (fifo *LevelUniqueQueueByteFIFO) Has(ctx context.Context, data []byte) (bool, error) { return fifo.internal.Has(data) } diff --git a/modules/queue/unique_queue_disk_channel.go b/modules/queue/unique_queue_disk_channel.go index 47c4f2bdd574d..65a3941519954 100644 --- a/modules/queue/unique_queue_disk_channel.go +++ b/modules/queue/unique_queue_disk_channel.go @@ -36,7 +36,7 @@ type PersistableChannelUniqueQueueConfiguration struct { // task cannot be processed twice or more at the same time. Uniqueness is // only guaranteed whilst the task is waiting in the queue. type PersistableChannelUniqueQueue struct { - *ChannelUniqueQueue + channelQueue *ChannelUniqueQueue delayedStarter lock sync.Mutex closed chan struct{} @@ -85,8 +85,8 @@ func NewPersistableChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interfac } queue := &PersistableChannelUniqueQueue{ - ChannelUniqueQueue: channelUniqueQueue.(*ChannelUniqueQueue), - closed: make(chan struct{}), + channelQueue: channelUniqueQueue.(*ChannelUniqueQueue), + closed: make(chan struct{}), } levelQueue, err := NewLevelUniqueQueue(func(data ...Data) { @@ -138,14 +138,14 @@ func (q *PersistableChannelUniqueQueue) PushFunc(data Data, fn func() error) err case <-q.closed: return q.internal.(UniqueQueue).PushFunc(data, fn) default: - return q.ChannelUniqueQueue.PushFunc(data, fn) + return q.channelQueue.PushFunc(data, fn) } } // Has will test if the queue has the data func (q *PersistableChannelUniqueQueue) Has(data Data) (bool, error) { // This is more difficult... - has, err := q.ChannelUniqueQueue.Has(data) + has, err := q.channelQueue.Has(data) if err != nil || has { return has, err } @@ -158,7 +158,7 @@ func (q *PersistableChannelUniqueQueue) Has(data Data) (bool, error) { } // Run starts to run the queue -func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(context.Context, func())) { +func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) { log.Debug("PersistableChannelUniqueQueue: %s Starting", q.delayedStarter.name) q.lock.Lock() @@ -170,7 +170,7 @@ func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(context log.Error("Unable push to channelled queue: %v", err) } } - }, q.exemplar) + }, q.channelQueue.exemplar) q.lock.Unlock() if err != nil { log.Fatal("Unable to create internal queue for %s Error: %v", q.Name(), err) @@ -179,53 +179,73 @@ func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(context } else { q.lock.Unlock() } - atShutdown(context.Background(), q.Shutdown) - atTerminate(context.Background(), q.Terminate) + atShutdown(q.Shutdown) + atTerminate(q.Terminate) + _ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0) - // Just run the level queue - we shut it down later - go q.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) - - go func() { - _ = q.ChannelUniqueQueue.AddWorkers(q.workers, 0) - }() + if luq, ok := q.internal.(*LevelUniqueQueue); ok && luq.ByteFIFOUniqueQueue.byteFIFO.Len(luq.shutdownCtx) != 0 { + // Just run the level queue - we shut it down once it's flushed + go q.internal.Run(func(_ func()) {}, func(_ func()) {}) + go func() { + _ = q.internal.Flush(0) + log.Debug("LevelUniqueQueue: %s flushed so shutting down", q.internal.(*LevelQueue).Name()) + q.internal.(*LevelUniqueQueue).Shutdown() + GetManager().Remove(q.internal.(*LevelUniqueQueue).qid) + }() + } else { + log.Debug("PersistableChannelUniqueQueue: %s Skipping running the empty level queue", q.delayedStarter.name) + q.internal.(*LevelUniqueQueue).Shutdown() + GetManager().Remove(q.internal.(*LevelUniqueQueue).qid) + } - log.Trace("PersistableChannelUniqueQueue: %s Waiting til closed", q.delayedStarter.name) - <-q.closed - log.Trace("PersistableChannelUniqueQueue: %s Cancelling pools", q.delayedStarter.name) - q.internal.(*LevelUniqueQueue).cancel() - q.ChannelUniqueQueue.cancel() - log.Trace("PersistableChannelUniqueQueue: %s Waiting til done", q.delayedStarter.name) - q.ChannelUniqueQueue.Wait() - q.internal.(*LevelUniqueQueue).Wait() - // Redirect all remaining data in the chan to the internal channel - go func() { - log.Trace("PersistableChannelUniqueQueue: %s Redirecting remaining data", q.delayedStarter.name) - for data := range q.ChannelUniqueQueue.dataChan { - _ = q.internal.Push(data) - } - log.Trace("PersistableChannelUniqueQueue: %s Done Redirecting remaining data", q.delayedStarter.name) - }() - log.Trace("PersistableChannelUniqueQueue: %s Done main loop", q.delayedStarter.name) } // Flush flushes the queue func (q *PersistableChannelUniqueQueue) Flush(timeout time.Duration) error { - return q.ChannelUniqueQueue.Flush(timeout) + return q.channelQueue.Flush(timeout) +} + +// FlushWithContext flushes the queue +func (q *PersistableChannelUniqueQueue) FlushWithContext(ctx context.Context) error { + return q.channelQueue.FlushWithContext(ctx) +} + +// IsEmpty checks if a queue is empty +func (q *PersistableChannelUniqueQueue) IsEmpty() bool { + return q.channelQueue.IsEmpty() } // Shutdown processing this queue func (q *PersistableChannelUniqueQueue) Shutdown() { log.Trace("PersistableChannelUniqueQueue: %s Shutting down", q.delayedStarter.name) q.lock.Lock() - defer q.lock.Unlock() select { case <-q.closed: + q.lock.Unlock() + return default: if q.internal != nil { q.internal.(*LevelUniqueQueue).Shutdown() } close(q.closed) + q.lock.Unlock() } + + log.Trace("PersistableChannelUniqueQueue: %s Cancelling pools", q.delayedStarter.name) + q.internal.(*LevelUniqueQueue).baseCtxCancel() + q.channelQueue.baseCtxCancel() + log.Trace("PersistableChannelUniqueQueue: %s Waiting til done", q.delayedStarter.name) + q.channelQueue.Wait() + q.internal.(*LevelUniqueQueue).Wait() + // Redirect all remaining data in the chan to the internal channel + go func() { + log.Trace("PersistableChannelUniqueQueue: %s Redirecting remaining data", q.delayedStarter.name) + for data := range q.channelQueue.dataChan { + _ = q.internal.Push(data) + } + log.Trace("PersistableChannelUniqueQueue: %s Done Redirecting remaining data", q.delayedStarter.name) + }() + log.Debug("PersistableChannelUniqueQueue: %s Shutdown", q.delayedStarter.name) } diff --git a/modules/queue/unique_queue_redis.go b/modules/queue/unique_queue_redis.go index 20a50cc1f235f..7474c096655d3 100644 --- a/modules/queue/unique_queue_redis.go +++ b/modules/queue/unique_queue_redis.go @@ -5,9 +5,8 @@ package queue import ( - "fmt" + "context" - "code.gitea.io/gitea/modules/graceful" "github.com/go-redis/redis/v8" ) @@ -51,8 +50,6 @@ func NewRedisUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, return nil, err } - byteFIFO.ctx = graceful.NewChannelContext(byteFIFOQueue.IsTerminated(), fmt.Errorf("queue has been terminated")) - queue := &RedisUniqueQueue{ ByteFIFOUniqueQueue: byteFIFOQueue, } @@ -92,8 +89,8 @@ func NewRedisUniqueByteFIFO(config RedisUniqueByteFIFOConfiguration) (*RedisUniq } // PushFunc pushes data to the end of the fifo and calls the callback if it is added -func (fifo *RedisUniqueByteFIFO) PushFunc(data []byte, fn func() error) error { - added, err := fifo.client.SAdd(fifo.ctx, fifo.setName, data).Result() +func (fifo *RedisUniqueByteFIFO) PushFunc(ctx context.Context, data []byte, fn func() error) error { + added, err := fifo.client.SAdd(ctx, fifo.setName, data).Result() if err != nil { return err } @@ -105,12 +102,12 @@ func (fifo *RedisUniqueByteFIFO) PushFunc(data []byte, fn func() error) error { return err } } - return fifo.client.RPush(fifo.ctx, fifo.queueName, data).Err() + return fifo.client.RPush(ctx, fifo.queueName, data).Err() } // Pop pops data from the start of the fifo -func (fifo *RedisUniqueByteFIFO) Pop() ([]byte, error) { - data, err := fifo.client.LPop(fifo.ctx, fifo.queueName).Bytes() +func (fifo *RedisUniqueByteFIFO) Pop(ctx context.Context) ([]byte, error) { + data, err := fifo.client.LPop(ctx, fifo.queueName).Bytes() if err != nil && err != redis.Nil { return data, err } @@ -119,13 +116,13 @@ func (fifo *RedisUniqueByteFIFO) Pop() ([]byte, error) { return data, nil } - err = fifo.client.SRem(fifo.ctx, fifo.setName, data).Err() + err = fifo.client.SRem(ctx, fifo.setName, data).Err() return data, err } // Has returns whether the fifo contains this data -func (fifo *RedisUniqueByteFIFO) Has(data []byte) (bool, error) { - return fifo.client.SIsMember(fifo.ctx, fifo.setName, data).Result() +func (fifo *RedisUniqueByteFIFO) Has(ctx context.Context, data []byte) (bool, error) { + return fifo.client.SIsMember(ctx, fifo.setName, data).Result() } func init() { diff --git a/modules/queue/workerpool.go b/modules/queue/workerpool.go index 0f15ccac9efd7..0176e2e0b2d20 100644 --- a/modules/queue/workerpool.go +++ b/modules/queue/workerpool.go @@ -21,7 +21,7 @@ import ( type WorkerPool struct { lock sync.Mutex baseCtx context.Context - cancel context.CancelFunc + baseCtxCancel context.CancelFunc cond *sync.Cond qid int64 maxNumberOfWorkers int @@ -52,7 +52,7 @@ func NewWorkerPool(handle HandlerFunc, config WorkerPoolConfiguration) *WorkerPo dataChan := make(chan Data, config.QueueLength) pool := &WorkerPool{ baseCtx: ctx, - cancel: cancel, + baseCtxCancel: cancel, batchLength: config.BatchLength, dataChan: dataChan, handle: handle, @@ -83,7 +83,7 @@ func (p *WorkerPool) Push(data Data) { } func (p *WorkerPool) zeroBoost() { - ctx, cancel := context.WithCancel(p.baseCtx) + ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout) mq := GetManager().GetManagedQueue(p.qid) boost := p.boostWorkers if (boost+p.numberOfWorkers) > p.maxNumberOfWorkers && p.maxNumberOfWorkers >= 0 { @@ -94,26 +94,14 @@ func (p *WorkerPool) zeroBoost() { start := time.Now() pid := mq.RegisterWorkers(boost, start, true, start.Add(p.boostTimeout), cancel, false) - go func() { - select { - case <-ctx.Done(): - case <-time.After(p.boostTimeout): - } + cancel = func() { mq.RemoveWorkers(pid) - cancel() - }() + } } else { log.Warn("WorkerPool: %d has zero workers - adding %d temporary workers for %s", p.qid, p.boostWorkers, p.boostTimeout) - go func() { - select { - case <-ctx.Done(): - case <-time.After(p.boostTimeout): - } - cancel() - }() } p.lock.Unlock() - p.addWorkers(ctx, boost) + p.addWorkers(ctx, cancel, boost) } func (p *WorkerPool) pushBoost(data Data) { @@ -140,7 +128,7 @@ func (p *WorkerPool) pushBoost(data Data) { return } p.blockTimeout *= 2 - ctx, cancel := context.WithCancel(p.baseCtx) + boostCtx, boostCtxCancel := context.WithCancel(p.baseCtx) mq := GetManager().GetManagedQueue(p.qid) boost := p.boostWorkers if (boost+p.numberOfWorkers) > p.maxNumberOfWorkers && p.maxNumberOfWorkers >= 0 { @@ -150,24 +138,24 @@ func (p *WorkerPool) pushBoost(data Data) { log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, mq.Name, ourTimeout, boost, p.boostTimeout, p.blockTimeout) start := time.Now() - pid := mq.RegisterWorkers(boost, start, true, start.Add(p.boostTimeout), cancel, false) + pid := mq.RegisterWorkers(boost, start, true, start.Add(p.boostTimeout), boostCtxCancel, false) go func() { - <-ctx.Done() + <-boostCtx.Done() mq.RemoveWorkers(pid) - cancel() + boostCtxCancel() }() } else { log.Warn("WorkerPool: %d Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) } go func() { <-time.After(p.boostTimeout) - cancel() + boostCtxCancel() p.lock.Lock() p.blockTimeout /= 2 p.lock.Unlock() }() p.lock.Unlock() - p.addWorkers(ctx, boost) + p.addWorkers(boostCtx, boostCtxCancel, boost) p.dataChan <- data } } @@ -243,28 +231,25 @@ func (p *WorkerPool) commonRegisterWorkers(number int, timeout time.Duration, is mq := GetManager().GetManagedQueue(p.qid) if mq != nil { pid := mq.RegisterWorkers(number, start, hasTimeout, end, cancel, isFlusher) - go func() { - <-ctx.Done() - mq.RemoveWorkers(pid) - cancel() - }() log.Trace("WorkerPool: %d (for %s) adding %d workers with group id: %d", p.qid, mq.Name, number, pid) - } else { - log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number) - + return ctx, func() { + mq.RemoveWorkers(pid) + } } + log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number) + return ctx, cancel } // AddWorkers adds workers to the pool - this allows the number of workers to go above the limit func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc { ctx, cancel := p.commonRegisterWorkers(number, timeout, false) - p.addWorkers(ctx, number) + p.addWorkers(ctx, cancel, number) return cancel } // addWorkers adds workers to the pool -func (p *WorkerPool) addWorkers(ctx context.Context, number int) { +func (p *WorkerPool) addWorkers(ctx context.Context, cancel context.CancelFunc, number int) { for i := 0; i < number; i++ { p.lock.Lock() if p.cond == nil { @@ -279,11 +264,13 @@ func (p *WorkerPool) addWorkers(ctx context.Context, number int) { p.numberOfWorkers-- if p.numberOfWorkers == 0 { p.cond.Broadcast() + cancel() } else if p.numberOfWorkers < 0 { // numberOfWorkers can't go negative but... log.Warn("Number of Workers < 0 for QID %d - this shouldn't happen", p.qid) p.numberOfWorkers = 0 p.cond.Broadcast() + cancel() } p.lock.Unlock() }() diff --git a/options/gitignore/AltiumDesigner b/options/gitignore/AltiumDesigner new file mode 100644 index 0000000000000..5e410492cb66e --- /dev/null +++ b/options/gitignore/AltiumDesigner @@ -0,0 +1,20 @@ +# For PCBs designed using Altium Designer +# Website: https://www.altium.com/altium-designer/ + +# Directories containing cache data +History +__Previews + +# Directories containing logs and generated outputs +Project\ Logs* +Project\ Outputs* + +# Misc files generated by altium +debug.log +Status\ Report.txt +*.PcbDoc.htm +*.SchDocPreview +*.PcbDocPreview + +# Lock files sometimes left behind +.~lock.* diff --git a/options/gitignore/Autotools b/options/gitignore/Autotools index f2c137d046a6a..d9ecd8928acc3 100644 --- a/options/gitignore/Autotools +++ b/options/gitignore/Autotools @@ -16,6 +16,7 @@ autom4te.cache /autoscan-*.log /aclocal.m4 /compile +/config.cache /config.guess /config.h.in /config.log diff --git a/options/gitignore/Coq b/options/gitignore/Coq index 829ac44a1c7b0..66596b22ed3ae 100644 --- a/options/gitignore/Coq +++ b/options/gitignore/Coq @@ -10,6 +10,7 @@ *.glob *.ml.d *.ml4.d +*.mlg.d *.mli.d *.mllib.d *.mlpack.d @@ -20,7 +21,7 @@ *.vo *.vok *.vos -.coq-native/ +.coq-native .csdp.cache .lia.cache .nia.cache @@ -31,6 +32,7 @@ lia.cache nia.cache nlia.cache nra.cache +native_compute_profile_*.data # generated timing files *.timing.diff diff --git a/options/gitignore/Dart b/options/gitignore/Dart index 6d21af37c97b2..3a83c2f087b95 100644 --- a/options/gitignore/Dart +++ b/options/gitignore/Dart @@ -11,6 +11,9 @@ pubspec.lock # If you don't generate documentation locally you can remove this line. doc/api/ +# dotenv environment variables file +.env* + # Avoid committing generated Javascript files: *.dart.js *.info.json # Produced by the --dump-info flag. diff --git a/options/gitignore/IAR_EWARM b/options/gitignore/IAR_EWARM index 13ed9a0b19224..e456471f66acc 100644 --- a/options/gitignore/IAR_EWARM +++ b/options/gitignore/IAR_EWARM @@ -1,5 +1,5 @@ # gitignore template for the IAR EWARM -# website: https://www.iar.com/ +# website: https://www.iar.com/knowledge/support/technical-notes/ide/which-files-should-be-version-controlled/ # Some tools will put the EWARM files # under a subdirectory with the same name diff --git a/options/gitignore/JetBrains b/options/gitignore/JetBrains index 8da0824ba5497..0a16fa718cd28 100644 --- a/options/gitignore/JetBrains +++ b/options/gitignore/JetBrains @@ -8,6 +8,9 @@ .idea/**/dictionaries .idea/**/shelf +# AWS User-specific +.idea/**/aws.xml + # Generated files .idea/**/contentModel.xml diff --git a/options/gitignore/ROS2 b/options/gitignore/ROS2 new file mode 100644 index 0000000000000..6cc824d8e6d94 --- /dev/null +++ b/options/gitignore/ROS2 @@ -0,0 +1,29 @@ +install/ +log/ +build/ + +# Ignore generated docs +*.dox +*.wikidoc + +# eclipse stuff +.project +.cproject + +# qcreator stuff +CMakeLists.txt.user + +srv/_*.py +*.pcd +*.pyc +qtcreator-* +*.user + +*~ + +# Emacs +.#* + +# Colcon custom files +COLCON_IGNORE +AMENT_IGNORE diff --git a/options/gitignore/Rust b/options/gitignore/Rust index ff47c2d77d919..6985cf1bd09dd 100644 --- a/options/gitignore/Rust +++ b/options/gitignore/Rust @@ -9,3 +9,6 @@ Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb diff --git a/options/gitignore/Scala b/options/gitignore/Scala index 9c07d4ae98846..7169cab19511e 100644 --- a/options/gitignore/Scala +++ b/options/gitignore/Scala @@ -1,2 +1,5 @@ *.class *.log + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* diff --git a/options/gitignore/Strapi b/options/gitignore/Strapi new file mode 100644 index 0000000000000..70e6542590a9b --- /dev/null +++ b/options/gitignore/Strapi @@ -0,0 +1,135 @@ +############################ +# OS X +############################ + +.DS_Store +.AppleDouble +.LSOverride +Icon +.Spotlight-V100 +.Trashes +._* + + +############################ +# Linux +############################ + +*~ + + +############################ +# Windows +############################ + +Thumbs.db +ehthumbs.db +Desktop.ini +$RECYCLE.BIN/ +*.cab +*.msi +*.msm +*.msp + + +############################ +# Packages +############################ + +*.7z +*.csv +*.dat +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.zip +*.com +*.class +*.dll +*.exe +*.o +*.seed +*.so +*.swo +*.swp +*.swn +*.swm +*.out +*.pid + + +############################ +# Logs and databases +############################ + +.tmp +*.log +*.sql +*.sqlite + + +############################ +# Misc. +############################ + +*# +.idea +nbproject +.vscode/ + + +############################ +# Node.js +############################ + +lib-cov +lcov.info +pids +logs +results +build +node_modules +.node_history +package-lock.json +**/package-lock.json +!docs/package-lock.json +*.heapsnapshot + + +############################ +# Tests +############################ + +testApp +coverage +cypress/screenshots +cypress/videos + + +############################ +# Documentation +############################ + +dist + +############################ +# Builds +############################ + +packages/strapi-generate-new/files/public/ + +############################ +# Example app +############################ + +.dev +# *.cache + +############################ +# Visual Studio Code +############################ + +front-workspace.code-workspace diff --git a/options/gitignore/TeX b/options/gitignore/TeX index 8a42ebbd98cdf..237f49ebaa20e 100644 --- a/options/gitignore/TeX +++ b/options/gitignore/TeX @@ -120,6 +120,7 @@ acs-*.bib # gregoriotex *.gaux +*.glog *.gtex # htlatex @@ -166,6 +167,9 @@ _minted* # morewrites *.mw +# newpax +*.newpax + # nomencl *.nlg *.nlo diff --git a/options/gitignore/TwinCAT3 b/options/gitignore/TwinCAT3 new file mode 100644 index 0000000000000..7bd6f87505cb5 --- /dev/null +++ b/options/gitignore/TwinCAT3 @@ -0,0 +1,25 @@ +# gitignore template for TwinCAT3 +# website: https://www.beckhoff.com/twincat3/ +# +# Recommended: VisualStudio.gitignore + +# TwinCAT files +*.tpy +*.tclrs +*.compiled-library +*.compileinfo +# Don't include the tmc-file rule if either of the following is true: +# 1. You've got TwinCAT C++ projects, as the information in the TMC-file is created manually for the C++ projects (in that case, only (manually) ignore the tmc-files for the PLC projects) +# 2. You've created a standalone PLC-project and added events to it, as these are stored in the TMC-file. +*.tmc +*.tmcRefac +*.library +*.project.~u +*.tsproj.bak +*.xti.bak +LineIDs.dbg +LineIDs.dbg.bak +_Boot/ +_CompileInfo/ +_Libraries/ +_ModuleInstall/ \ No newline at end of file diff --git a/options/gitignore/V b/options/gitignore/V new file mode 100644 index 0000000000000..bc0d4f3aaaa74 --- /dev/null +++ b/options/gitignore/V @@ -0,0 +1,14 @@ +* +!*/ +!*.* +*.exe +*.o +*.so +*.tmp.c +*.exp +*.ilk +*.pdb +*.dll +*.lib +*.bak +*.out diff --git a/options/gitignore/VisualStudio b/options/gitignore/VisualStudio index 1ee53850b84cd..34c8dee45388e 100644 --- a/options/gitignore/VisualStudio +++ b/options/gitignore/VisualStudio @@ -90,6 +90,7 @@ StyleCopReport.xml *.tmp_proj *_wpftmp.csproj *.log +*.tlog *.vspscc *.vssscc .builds @@ -205,6 +206,9 @@ PublishScripts/ *.nuget.props *.nuget.targets +# Nuget personal access tokens and Credentials +nuget.config + # Microsoft Azure Build Output csx/ *.build.csdef @@ -360,3 +364,25 @@ MigrationBackup/ # Fody - auto-generated XML schema FodyWeavers.xsd + +# VS Code files for those working on multiple tools +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +# Local History for Visual Studio Code +.history/ + +# Windows Installer files from build outputs +*.cab +*.msi +*.msix +*.msm +*.msp + +# JetBrains Rider +.idea/ +*.sln.iml diff --git a/options/license/0BSD b/options/license/0BSD index 72c7baf54c2f1..0b8ae762b2405 100644 --- a/options/license/0BSD +++ b/options/license/0BSD @@ -1,4 +1,4 @@ -Copyright (C) 2006 by Rob Landley +Copyright (C) YEAR by AUTHOR EMAIL Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted. diff --git a/options/license/BSD-3-Clause-No-Military-License b/options/license/BSD-3-Clause-No-Military-License new file mode 100644 index 0000000000000..e06aa93b51391 --- /dev/null +++ b/options/license/BSD-3-Clause-No-Military-License @@ -0,0 +1,16 @@ +Copyright (c) year copyright holder. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. +Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. +Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. +Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +YOU ACKNOWLEDGE THAT THIS SOFTWARE IS NOT DESIGNED, LICENSED OR INTENDED FOR USE IN THE DESIGN, CONSTRUCTION, OPERATION OR MAINTENANCE OF ANY MILITARY FACILITY. diff --git a/options/license/CAL-1.0 b/options/license/CAL-1.0 index e0ccf819dc063..4cebc6d54df62 100644 --- a/options/license/CAL-1.0 +++ b/options/license/CAL-1.0 @@ -135,6 +135,11 @@ Code corresponding to the modifications in the Modified Work must be provided to the Recipient either a) under this License, or b) under a Compatible Open Source License. +A “Compatible Open Source License” means a license accepted by the Open Source +Initiative that allows object code created using both Source Code provided under +this License and Source Code provided under the other open source license to be +distributed together as a single work. + #### 4.1.3. Coordinated Disclosure of Security Vulnerabilities You may delay providing the Source Code corresponding to a particular diff --git a/options/license/CAL-1.0-Combined-Work-Exception b/options/license/CAL-1.0-Combined-Work-Exception index e0ccf819dc063..4cebc6d54df62 100644 --- a/options/license/CAL-1.0-Combined-Work-Exception +++ b/options/license/CAL-1.0-Combined-Work-Exception @@ -135,6 +135,11 @@ Code corresponding to the modifications in the Modified Work must be provided to the Recipient either a) under this License, or b) under a Compatible Open Source License. +A “Compatible Open Source License” means a license accepted by the Open Source +Initiative that allows object code created using both Source Code provided under +this License and Source Code provided under the other open source license to be +distributed together as a single work. + #### 4.1.3. Coordinated Disclosure of Security Vulnerabilities You may delay providing the Source Code corresponding to a particular diff --git a/options/license/CDL-1.0 b/options/license/CDL-1.0 new file mode 100644 index 0000000000000..e2990cde2db75 --- /dev/null +++ b/options/license/CDL-1.0 @@ -0,0 +1,53 @@ +Common Documentation License + +Version 1.0 - February 16, 2001 + +Copyright © 2001 Apple Computer, Inc. + +Permission is granted to copy and distribute verbatim copies of this License, but changing or adding to it in any way is not permitted. + +Please read this License carefully before downloading or using this material. By downloading or using this material, you are agreeing to be bound by the terms of this License. If you do not or cannot agree to the terms of this License, please do not download or use this material. + +0. Preamble. The Common Documentation License (CDL) provides a very simple and consistent license that allows relatively unrestricted use and redistribution of documents while still maintaining the author's credit and intent. To preserve simplicity, the License does not specify in detail how (e.g. font size) or where (e.g. title page, etc.) the author should be credited. To preserve consistency, changes to the CDL are not allowed and all derivatives of CDL documents are required to remain under the CDL. Together, these constraints enable third parties to easily and safely reuse CDL documents, making the CDL ideal for authors who desire a wide distribution of their work. However, this means the CDL does not allow authors to restrict precisely how their work is used or represented, making it inappropriate for those desiring more finely-grained control. + +1. General; Definitions. This License applies to any documentation, manual or other work that contains a notice placed by the Copyright Holder stating that it is subject to the terms of this Common Documentation License version 1.0 (or subsequent version thereof) ("License"). As used in this License: + +1.1 "Copyright Holder" means the original author(s) of the Document or other owner(s) of the copyright in the Document. + +1.2 "Document(s)" means any documentation, manual or other work that has been identified as being subject to the terms of this License. + +1.3 "Derivative Work" means a work which is based upon a pre-existing Document, such as a revision, modification, translation, abridgment, condensation, expansion, or any other form in which such pre-existing Document may be recast, transformed, or adapted. + +1.4 "You" or "Your" means an individual or a legal entity exercising rights under this License. + +2. Basic License. Subject to all the terms and conditions of this License, You may use, copy, modify, publicly display, distribute and publish the Document and your Derivative Works thereof, in any medium physical or electronic, commercially or non-commercially; provided that: (a) all copyright notices in the Document are preserved; (b) a copy of this License, or an incorporation of it by reference in proper form as indicated in Exhibit A below, is included in a conspicuous location in all copies such that it would be reasonably viewed by the recipient of the Document; and (c) You add no other terms or conditions to those of this License. + +3. Derivative Works. All Derivative Works are subject to the terms of this License. You may copy and distribute a Derivative Work of the Document under the conditions of Section 2 above, provided that You release the Derivative Work under the exact, verbatim terms of this License (i.e., the Derivative Work is licensed as a "Document" under the terms of this License). In addition, Derivative Works of Documents must meet the following requirements: + + (a) All copyright and license notices in the original Document must be preserved. + + (b) An appropriate copyright notice for your Derivative Work must be added adjacent to the other copyright notices. + + (c) A statement briefly summarizing how your Derivative Work is different from the original Document must be included in the same place as your copyright notice. + + (d) If it is not reasonably evident to a recipient of your Derivative Work that the Derivative Work is subject to the terms of this License, a statement indicating such fact must be included in the same place as your copyright notice. + +4. Compilation with Independent Works. You may compile or combine a Document or its Derivative Works with other separate and independent documents or works to create a compilation work ("Compilation"). If included in a Compilation, the Document or Derivative Work thereof must still be provided under the terms of this License, and the Compilation shall contain (a) a notice specifying the inclusion of the Document and/or Derivative Work and the fact that it is subject to the terms of this License, and (b) either a copy of the License or an incorporation by reference in proper form (as indicated in Exhibit A). Mere aggregation of a Document or Derivative Work with other documents or works on the same storage or distribution medium (e.g. a CD-ROM) will not cause this License to apply to those other works. + +5. NO WARRANTY. THE DOCUMENT IS PROVIDED 'AS IS' BASIS, WITHOUT WARRANTY OF ANY KIND, AND THE COPYRIGHT HOLDER EXPRESSLY DISCLAIMS ALL WARRANTIES AND/OR CONDITIONS WITH RESPECT TO THE DOCUMENT, EITHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES AND/OR CONDITIONS OF MERCHANTABILITY, OF SATISFACTORY QUALITY, OF FITNESS FOR A PARTICULAR PURPOSE, OF ACCURACY, OF QUIET ENJOYMENT, AND OF NONINFRINGEMENT OF THIRD PARTY RIGHTS. + +6. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY INCIDENTAL, SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATING TO THIS LICENSE OR YOUR USE, REPRODUCTION, MODIFICATION, DISTRIBUTION AND/OR PUBLICATION OF THE DOCUMENT, OR ANY PORTION THEREOF, WHETHER UNDER A THEORY OF CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND NOTWITHSTANDING THE FAILURE OF ESSENTIAL PURPOSE OF ANY REMEDY. + +7. Trademarks. This License does not grant any rights to use any names, trademarks, service marks or logos of the Copyright Holder (collectively "Marks") and no such Marks may be used to endorse or promote works or products derived from the Document without the prior written permission of the Copyright Holder. + +8. Versions of the License. Apple Computer, Inc. ("Apple") may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Once a Document has been published under a particular version of this License, You may continue to use it under the terms of that version. You may also choose to use such Document under the terms of any subsequent version of this License published by Apple. No one other than Apple has the right to modify the terms applicable to Documents created under this License. + +9. Termination. This License and the rights granted hereunder will terminate automatically if You fail to comply with any of its terms. Upon termination, You must immediately stop any further reproduction, modification, public display, distr ibution and publication of the Document and Derivative Works. However, all sublicenses to the Document and Derivative Works which have been properly granted prior to termination shall survive any termination of this License. Provisions which, by their nat ure, must remain in effect beyond the termination of this License shall survive, including but not limited to Sections 5, 6, 7, 9 and 10. + +10. Waiver; Severability; Governing Law. Failure by the Copyright Holder to enforce any provision of this License will not be deemed a waiver of future enforcement of that or any other provision. If for any reason a court of competent jurisdiction finds any provision of this License, or portion thereof, to be unenforceable, that provision of the License will be enforced to the maximum extent permissible so as to effect the economic benefits and intent of the parties, and the remainder of this License will continue in full force and effect. This License shall be governed by the laws of the United States and the State of California, except that body of California law concerning conflicts of law. + +EXHIBIT A + +The proper form for an incorporation of this License by reference is as follows: + +"Copyright (c) [year] by [Copyright Holder's name]. This material has been released under and is subject to the terms of the Common Documentation License, v.1.0, the terms of which are hereby incorporated by reference. Please obtain a copy of the License at http://www.opensource.apple.com/cdl/ and read it before using this material. Your use of this material signifies your agreement to the terms of the License." diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 936677e31d333..ab7367ba7ad3f 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -1550,6 +1550,7 @@ settings.email_notifications.disable = Disable Email Notifications settings.email_notifications.submit = Set Email Preference settings.site = Website settings.update_settings = Update Settings +settings.branches.update_default_branch = Update Default Branch settings.advanced_settings = Advanced Settings settings.wiki_desc = Enable Repository Wiki settings.use_internal_wiki = Use Built-In Wiki @@ -2282,7 +2283,6 @@ auths.host = Host auths.port = Port auths.bind_dn = Bind DN auths.bind_password = Bind Password -auths.bind_password_helper = Warning: This password is stored in plain text. Use a read-only account if possible. auths.user_base = User Search Base auths.user_dn = User DN auths.attribute_username = Username Attribute diff --git a/options/locale/locale_es-ES.ini b/options/locale/locale_es-ES.ini index 88d5a09b887c2..24e2b7b7783d6 100644 --- a/options/locale/locale_es-ES.ini +++ b/options/locale/locale_es-ES.ini @@ -854,6 +854,7 @@ branch=Rama tree=Árbol clear_ref=`Borrar referencia actual` filter_branch_and_tag=Filtrar por rama o etiqueta +find_tag=Buscar etiqueta branches=Ramas tags=Etiquetas issues=Incidencias @@ -1158,7 +1159,7 @@ issues.label_color=Color etiqueta issues.label_count=%d etiquetas issues.label_open_issues=%d incidencias abiertas issues.label_edit=Editar -issues.label_delete=Borrar +issues.label_delete=Eliminar issues.label_modify=Editar etiqueta issues.label_deletion=Eliminar etiqueta issues.label_deletion_desc=Eliminar una etiqueta la elimina de todos las incidencias. ¿Continuar? @@ -1284,6 +1285,8 @@ issues.review.resolved_by=ha marcado esta conversación como resuelta issues.assignee.error=No todos los asignados fueron añadidos debido a un error inesperado. issues.reference_issue.body=Cuerpo +compare.compare_base=base +compare.compare_head=comparar pulls.desc=Activar Pull Requests y revisiones de código. pulls.new=Nuevo Pull Request @@ -1546,6 +1549,7 @@ settings.email_notifications.disable=Deshabilitar las notificaciones por correo settings.email_notifications.submit=Establecer Preferencia de correo electrónico settings.site=Sitio web settings.update_settings=Actualizar configuración +settings.branches.update_default_branch=Actualizar rama por defecto settings.advanced_settings=Ajustes avanzados settings.wiki_desc=Activar Wiki de repositorio settings.use_internal_wiki=Usar Wiki integrada @@ -1886,6 +1890,7 @@ diff.file_image_width=Anchura diff.file_image_height=Altura diff.file_byte_size=Tamaño diff.file_suppressed=La diferencia del archivo ha sido suprimido porque es demasiado grande +diff.file_suppressed_line_too_long=Las diferiencias del archivo han sido suprimidas porque una o mas lineas son muy largas diff.too_many_files=Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio diff.comment.placeholder=Deja un comentario diff.comment.markdown_info=Es posible estilizar con markdown. @@ -1913,6 +1918,7 @@ release.new_release=Nueva Release release.draft=Borrador release.prerelease=Pre-lanzamiento release.stable=Estable +release.compare=Comparar release.edit=editar release.ahead.commits=%d commits release.ahead.target=a %s desde esta versión @@ -2130,7 +2136,7 @@ dashboard.cron.error=Error en Cron: %s: %[3]s dashboard.cron.finished=Cron: %[1]s ha finalizado dashboard.delete_inactive_accounts=Eliminar todas las cuentas inactivas dashboard.delete_inactive_accounts.started=Se ha iniciado la tarea: "Eliminar todas las cuentas inactivas". -dashboard.delete_repo_archives=Borrar todos los archivos del repositorio (ZIP, TAR.GZ, etc.) +dashboard.delete_repo_archives=Eliminar todos los archivos del repositorio (ZIP, TAR.GZ, etc.) dashboard.delete_repo_archives.started=Se ha iniciado la tarea: "Eliminar todos los archivos del repositorios". dashboard.delete_missing_repos=Eliminar todos los repositorios que faltan sus archivos Git dashboard.delete_missing_repos.started=Se ha iniciado la tarea: "Eliminar todos los repositorios que faltan sus archivos Git". @@ -2179,6 +2185,8 @@ dashboard.total_gc_time=Pausa Total por GC dashboard.total_gc_pause=Pausa Total por GC dashboard.last_gc_pause=Última Pausa por GC dashboard.gc_times=Ejecuciones GC +dashboard.delete_old_actions=Eliminar todas las acciones antiguas de la base de datos +dashboard.delete_old_actions.started=Eliminar todas las acciones antiguas de la base de datos inicializada. users.user_manage_panel=Gestión de cuentas de usuario users.new_account=Crear Cuenta de Usuario @@ -2305,6 +2313,7 @@ auths.allowed_domains_helper=Dejar vacío para permitir todos los dominios. Sepa auths.enable_tls=Habilitar cifrado TLS auths.skip_tls_verify=Omitir la verificación TLS auths.pam_service_name=Nombre del Servicio PAM +auths.pam_email_domain=Dominio de correo de PAM (opcional) auths.oauth2_provider=Proveedor OAuth2 auths.oauth2_icon_url=URL de icono auths.oauth2_clientID=ID de cliente (clave) @@ -2404,6 +2413,7 @@ config.db_path=Ruta config.service_config=Configuración del servicio config.register_email_confirm=Requerir confirmación de correo electrónico para registrarse config.disable_register=Deshabilitar auto-registro +config.allow_only_internal_registration=Permitir el registro solo desde Gitea config.allow_only_external_registration=Permitir el registro únicamente a través de servicios externos config.enable_openid_signup=Habilitar el auto-registro con OpenID config.enable_openid_signin=Habilitar el inicio de sesión con OpenID diff --git a/options/locale/locale_ja-JP.ini b/options/locale/locale_ja-JP.ini index c583ff7848ff6..fa8a7b0116508 100644 --- a/options/locale/locale_ja-JP.ini +++ b/options/locale/locale_ja-JP.ini @@ -1285,6 +1285,8 @@ issues.review.resolved_by=がこの会話を解決済みにしました issues.assignee.error=予期しないエラーにより、一部の担当者を追加できませんでした。 issues.reference_issue.body=内容 +compare.compare_base=基準 +compare.compare_head=比較 pulls.desc=プルリクエストとコードレビューの有効化。 pulls.new=新しいプルリクエスト @@ -1547,6 +1549,7 @@ settings.email_notifications.disable=メール通知無効 settings.email_notifications.submit=メール設定を保存 settings.site=Webサイト settings.update_settings=設定を更新 +settings.branches.update_default_branch=デフォルトブランチを更新 settings.advanced_settings=拡張設定 settings.wiki_desc=Wikiを有効にする settings.use_internal_wiki=ビルトインのWikiを使用する @@ -1887,6 +1890,7 @@ diff.file_image_width=幅 diff.file_image_height=高さ diff.file_byte_size=サイズ diff.file_suppressed=ファイル差分が大きすぎるため省略します +diff.file_suppressed_line_too_long=長すぎる行があるためファイル差分は表示されません diff.too_many_files=変更されたファイルが多すぎるため、一部のファイルは表示されません diff.comment.placeholder=コメントを残す diff.comment.markdown_info=Markdownによる書式設定をサポートしています。 @@ -2309,6 +2313,7 @@ auths.allowed_domains_helper=すべてのドメインを許可する場合は空 auths.enable_tls=TLS暗号化を有効にする auths.skip_tls_verify=TLS検証を省略 auths.pam_service_name=PAMサービス名 +auths.pam_email_domain=PAM メールドメイン名 (オプション) auths.oauth2_provider=OAuth2プロバイダー auths.oauth2_icon_url=アイコンのURL auths.oauth2_clientID=クライアントID (キー) @@ -2408,6 +2413,7 @@ config.db_path=パス config.service_config=サービス設定 config.register_email_confirm=登録にはメールによる確認が必要 config.disable_register=セルフ登録無効 +config.allow_only_internal_registration=Gitea上での登録のみを許可 config.allow_only_external_registration=外部サービスを使用した登録のみを許可 config.enable_openid_signup=OpenIDを使ったセルフ登録有効 config.enable_openid_signin=OpenIDを使ったサインイン有効 diff --git a/options/locale/locale_pt-BR.ini b/options/locale/locale_pt-BR.ini index 6ec799603613a..dbcf1ca5a9d42 100644 --- a/options/locale/locale_pt-BR.ini +++ b/options/locale/locale_pt-BR.ini @@ -2040,6 +2040,7 @@ auths.allowed_domains_helper=Deixe em branco para permitir todos os domínios. S auths.enable_tls=Habilitar Criptografia TLS auths.skip_tls_verify=Pular verificação de TLS auths.pam_service_name=Nome de Serviço PAM +auths.pam_email_domain=Domínio de e-mail do PAM (opcional) auths.oauth2_provider=Provedor OAuth2 auths.oauth2_clientID=ID do cliente (chave) auths.oauth2_clientSecret=Senha do cliente diff --git a/options/locale/locale_pt-PT.ini b/options/locale/locale_pt-PT.ini index 065338b8a4f21..3e6f50c01cec2 100644 --- a/options/locale/locale_pt-PT.ini +++ b/options/locale/locale_pt-PT.ini @@ -193,7 +193,7 @@ sqlite3_not_available=Esta versão do Gitea não suporta o SQLite3. Descarregue invalid_db_setting=As configurações da base de dados são inválidas: %v invalid_repo_path=A localização base dos repositórios é inválida: %v run_user_not_match=O nome de utilizador para 'executar como' não é o nome de utilizador corrente: %s → %s -save_config_failed=Falha ao guardar a configuração: %v +save_config_failed=Falhou ao guardar a configuração: %v invalid_admin_setting=A configuração da conta de administrador é inválida: %v install_success=Bem-vindo(a)! Obrigado por escolher o Gitea. Divirta-se e aproveite! invalid_log_root_path=A localização dos registos é inválida: %v @@ -402,7 +402,7 @@ invalid_ssh_key=Não é possível verificar sua chave SSH: %s invalid_gpg_key=Não é possível verificar sua chave GPG: %s invalid_ssh_principal=Protagonista inválido: %s unable_verify_ssh_key=Não é possível verificar a chave SSH; verifique novamente se há erros. -auth_failed=Falha na autenticação: %v +auth_failed=Falhou a autenticação: %v still_own_repo=A sua conta possui um ou mais repositórios; deve excluí-los ou transferi-los primeiro. still_has_org=A sua conta é membro de uma ou mais organizações; deixe-as primeiro. @@ -411,7 +411,7 @@ org_still_own_repo=Esta organização ainda possui repositórios; deve excluí-l target_branch_not_exist=O ramo de destino não existe. [user] -change_avatar=Alterar seu avatar… +change_avatar=Mude o seu avatar… join_on=Inscreveu-se em repositories=Repositórios activity=Trabalho público @@ -459,7 +459,7 @@ update_theme=Substituir tema update_profile=Modificar perfil update_language_not_found=O idioma '%s' não está disponível. update_profile_success=O seu perfil foi modificado. -change_username=Seu nome de utilizador foi alterado. +change_username=O seu nome de utilizador foi modificado. change_username_prompt=Nota: alterações do nome de utilizador também alteram o URL de sua conta. change_username_redirect_prompt=O antigo nome de utilizador, enquanto não for reivindicado, irá reencaminhar para o novo. continue=Continuar @@ -476,8 +476,8 @@ enable_custom_avatar=Usar avatar personalizado choose_new_avatar=Escolher um novo avatar update_avatar=Substituir avatar delete_current_avatar=Eliminar o avatar corrente -uploaded_avatar_not_a_image=O ficheiro enviado não é uma imagem. -uploaded_avatar_is_too_big=O ficheiro enviado excedeu o tamanho máximo. +uploaded_avatar_not_a_image=O ficheiro carregado não é uma imagem. +uploaded_avatar_is_too_big=O ficheiro carregado excedeu o tamanho máximo. update_avatar_success=O seu avatar foi substituído. change_password=Substituir a senha @@ -638,7 +638,7 @@ or_enter_secret=Ou insira o segredo: %s then_enter_passcode=E insira o código apresentado na aplicação: passcode_invalid=O código está errado. Tente de novo. twofa_enrolled=A sua conta usa autenticação em dois passos. Guarde o seu código de recuperação (%s) num lugar seguro porque é mostrado somente uma vez! -twofa_failed_get_secret=Falha ao obter o segredo. +twofa_failed_get_secret=Falhou a obtenção do segredo. u2f_desc=Chaves de segurança são dispositivos de hardware contendo chaves criptográficas. Podem ser usadas para autenticação em dois passos. As chaves de segurança têm de suportar o standard FIDO U2F. u2f_require_twofa=A sua conta tem que ter habilitada a autenticação em dois passos para poder usar chaves de segurança. @@ -770,7 +770,7 @@ template.issue_labels=Rótulos das questões template.one_item=Tem que escolher pelo menos um item do modelo template.invalid=Tem que escolher um repositório modelo -archive.title=Este repositório está arquivado. Pode ver ficheiros e cloná-lo, mas não pode fazer envios ou lançar questões ou pedidos de integração. +archive.title=Este repositório está arquivado. Pode ver os seus ficheiros e cloná-lo, mas não pode fazer envios para o repositório nem lançar questões ou fazer pedidos de integração. archive.issue.nocomment=Este repositório está arquivado. Não pode comentar nas questões. archive.pull.nocomment=Este repositório está arquivado. Não pode comentar nos pedidos de integração. @@ -887,24 +887,24 @@ line=linha lines=linhas editor.new_file=Novo ficheiro -editor.upload_file=Enviar ficheiro +editor.upload_file=Carregar ficheiro editor.edit_file=Editar ficheiro -editor.preview_changes=Pré-visualizar alterações +editor.preview_changes=Pré-visualizar modificações editor.cannot_edit_lfs_files=Ficheiros LFS não podem ser editados na interface web. editor.cannot_edit_non_text_files=Ficheiros binários não podem ser editados na interface da web. editor.edit_this_file=Editar ficheiro editor.this_file_locked=Ficheiro bloqueado -editor.must_be_on_a_branch=Tem que estar num ramo para fazer ou propor alterações neste ficheiro. -editor.fork_before_edit=Tem que fazer uma derivação deste repositório para fazer ou propor alterações neste ficheiro. +editor.must_be_on_a_branch=Tem que estar num ramo para fazer ou propor modificações neste ficheiro. +editor.fork_before_edit=Tem que fazer uma derivação deste repositório para fazer ou propor modificações neste ficheiro. editor.delete_this_file=Eliminar ficheiro -editor.must_have_write_access=Tem que ter permissões de escrita para fazer ou propor alterações neste ficheiro. +editor.must_have_write_access=Tem que ter permissões de escrita para fazer ou propor modificações neste ficheiro. editor.file_delete_success=O ficheiro '%s' foi eliminado. editor.name_your_file=Nomeie o seu ficheiro… editor.filename_help=Adicione uma pasta escrevendo o nome dessa pasta seguido de uma barra('/'). Remova uma pasta carregando na tecla de apagar ('←') no início do campo. editor.or=ou editor.cancel_lower=Cancelar -editor.commit_signed_changes=Cometer alterações assinadas -editor.commit_changes=Cometer alterações +editor.commit_signed_changes=Cometer modificações assinadas +editor.commit_changes=Cometer modificações editor.add_tmpl=Adicionar '' editor.add=Adicionar '%s' editor.update=Modificar '%s' @@ -914,7 +914,7 @@ editor.signoff_desc=Adicionar "Assinado-por" seguido do autor do cometimento no editor.commit_directly_to_this_branch=Cometer imediatamente no ramo %s. editor.create_new_branch=Crie um novo ramo para este cometimento e inicie um pedido de integração. editor.create_new_branch_np=Criar um novo ramo para este cometimento. -editor.propose_file_change=Propor alteração de ficheiro +editor.propose_file_change=Propor modificação do ficheiro editor.new_branch_name_desc=Nome do novo ramo… editor.cancel=Cancelar editor.filename_cannot_be_empty=O nome do ficheiro não pode estar em branco. @@ -930,22 +930,22 @@ editor.file_changed_while_editing=O conteúdo do ficheiro mudou desde que começ editor.file_already_exists=Já existe um ficheiro com o nome '%s' neste repositório. editor.commit_empty_file_header=Cometer um ficheiro vazio editor.commit_empty_file_text=O ficheiro que está prestes a cometer está vazio. Quer continuar? -editor.no_changes_to_show=Não existem alterações a mostrar. +editor.no_changes_to_show=Não existem modificações para mostrar. editor.fail_to_update_file=Falhou ao modificar/criar o ficheiro '%s'. editor.fail_to_update_file_summary=Mensagem de erro: -editor.push_rejected_no_message=A alteração foi rejeitada pelo servidor sem qualquer mensagem. Verifique os automatismos do Git. -editor.push_rejected=A alteração foi rejeitada pelo servidor. Verifique os automatismos do Git. +editor.push_rejected_no_message=A modificação foi rejeitada pelo servidor sem qualquer mensagem. Verifique os automatismos do Git. +editor.push_rejected=A modificação foi rejeitada pelo servidor. Verifique os automatismos do Git. editor.push_rejected_summary=Mensagem completa de rejeição: editor.add_subdir=Adicionar uma pasta… -editor.unable_to_upload_files=Falha ao enviar ficheiros para '%s' com erro: %v +editor.unable_to_upload_files=Falhou o carregamento de ficheiros para '%s' com o erro: %v editor.upload_file_is_locked=O ficheiro '%s' está bloqueado por %s. -editor.upload_files_to_dir=Enviar ficheiros para '%s' +editor.upload_files_to_dir=Carregar ficheiros para '%s' editor.cannot_commit_to_protected_branch=Não é possível cometer para o ramo protegido '%s'. editor.no_commit_to_branch=Não é possível cometer imediatamente para o ramo porque: editor.user_no_push_to_branch=O utilizador não pode enviar para o ramo editor.require_signed_commit=O ramo requer um cometimento assinado -commits.desc=Navegar pelo histórico de alterações no código fonte. +commits.desc=Navegar pelo histórico de modificações no código fonte. commits.commits=Cometimentos commits.no_commits=Não há cometimentos em comum. '%s' e '%s' têm históricos completamente diferentes. commits.search=Procurar cometimentos… @@ -993,7 +993,7 @@ projects.board.new_title=Novo nome para o quadro projects.board.new_submit=Submeter projects.board.new=Novo quadro projects.board.set_default=Definir como padrão -projects.board.set_default_desc=Definir este painel como padrão para recebimentos e questões sem categoria +projects.board.set_default_desc=Definir este painel como padrão para puxadas e questões não categorizadas projects.board.delete=Eliminar quadro projects.board.deletion_desc=Eliminar um quadro de projecto faz com que todas as questões relacionadas sejam movidas para 'Sem categoria'. Continuar? projects.open=Abrir @@ -1043,7 +1043,7 @@ issues.label_templates.title=Carregar um conjunto predefinido de rótulos issues.label_templates.info=Ainda não existem rótulos. Crie um rótulo com 'Novo rótulo' ou use um conjunto de rótulos predefinido: issues.label_templates.helper=Escolha um conjunto de rótulos issues.label_templates.use=Usar conjunto de rótulos -issues.label_templates.fail_to_load_file=Falha ao carregar o ficheiro modelo de rótulos '%s': %v +issues.label_templates.fail_to_load_file=Falhou ao carregar o ficheiro modelo de rótulos '%s': %v issues.add_label=adicionou o rótulo %s %s issues.add_labels=adicionou os rótulos %s %s issues.remove_label=removeu o rótulo %s %s @@ -1136,7 +1136,7 @@ issues.poster=Autor issues.collaborator=Colaborador(a) issues.owner=Proprietário(a) issues.re_request_review=Voltar a solicitar revisão -issues.is_stale=Houve alterações neste pedido de integração posteriormente a esta revisão +issues.is_stale=Houve modificações neste pedido de integração posteriormente a esta revisão issues.remove_request_review=Remover solicitação de revisão issues.remove_request_review_block=Não é possível remover a solicitação de revisão issues.dismiss_review=Descartar revisão @@ -1207,10 +1207,11 @@ issues.time_spent_total=Total de tempo gasto issues.time_spent_from_all_authors=`Tempo total gasto: %s` issues.due_date=Data de vencimento issues.invalid_due_date_format=O formato da data de vencimento tem que ser 'aaaa-mm-dd'. -issues.error_modifying_due_date=Falha ao modificar a data de vencimento. -issues.error_removing_due_date=Falha ao remover a data de vencimento. +issues.error_modifying_due_date=Falhou a modificação da data de vencimento. +issues.error_removing_due_date=Falhou a remoção da data de vencimento. issues.push_commit_1=adicionou %d cometimento %s issues.push_commits_n=adicionou %d cometimentos %s +issues.force_push_codes=`forçou o envio %[1]s de %[2]s para %[4]s %[6]s` issues.due_date_form=yyyy-mm-dd issues.due_date_form_add=Adicionar data de vencimento issues.due_date_form_edit=Editar @@ -1250,14 +1251,14 @@ issues.dependency.add_error_dep_exists=A dependência já existe. issues.dependency.add_error_cannot_create_circular=Não pode criar uma dependência onde duas questões se bloqueiam simultaneamente. issues.dependency.add_error_dep_not_same_repo=Ambas as questões têm que estar no mesmo repositório. issues.review.self.approval=Não pode aprovar o seu próprio pedido de integração. -issues.review.self.rejection=Não pode solicitar alterações sobre o seu próprio pedido de integração. -issues.review.approve=aprovou estas alterações %s +issues.review.self.rejection=Não pode solicitar modificações sobre o seu próprio pedido de integração. +issues.review.approve=aprovou estas modificações %s issues.review.comment=reviu %s issues.review.dismissed=descartou a revisão de %s %s issues.review.dismissed_label=Descartada issues.review.left_comment=deixou um comentário -issues.review.content.empty=Tem que deixar um comentário indicando a(s) alteração(ões) solicitada(s). -issues.review.reject=alterações solicitadas %s +issues.review.content.empty=Tem que deixar um comentário indicando a(s) modificação(ões) solicitada(s). +issues.review.reject=modificações solicitadas %s issues.review.wait=foi solicitada para revisão %s issues.review.add_review_request=solicitou revisão de %s %s issues.review.remove_review_request=removeu a solicitação de revisão para %s %s @@ -1282,7 +1283,7 @@ pulls.new=Novo pedido de integração pulls.compare_changes=Novo pedido de integração pulls.compare_changes_desc=Escolha o ramo de destino e o ramo de origem. pulls.compare_base=integrar em -pulls.compare_compare=integrar a partir de +pulls.compare_compare=puxar de pulls.filter_branch=Filtrar ramo pulls.no_results=Não foram encontrados quaisquer resultados. pulls.nothing_to_compare=Estes ramos são iguais. Não há necessidade de criar um pedido de integração. @@ -1297,7 +1298,7 @@ pulls.tab_commits=Cometimentos pulls.tab_files=Ficheiros modificados pulls.reopen_to_merge=Reabra este pedido de integração para executar uma integração. pulls.cant_reopen_deleted_branch=Este pedido de integração não pode ser reaberto porque o ramo foi eliminado. -pulls.merged=Integração executada +pulls.merged=Integrações executadas pulls.merged_as=A integração constante no pedido foi executada como %[2]s. pulls.manually_merged=Integrado manualmente pulls.manually_merged_as=A integração constante neste pedido foi executada manualmente como %[2]s. @@ -1306,18 +1307,18 @@ pulls.has_merged=A integração constante no pedido foi executada. pulls.title_wip_desc=`Inicie o título com %s para evitar que o pedido de integração seja executado acidentalmente.` pulls.cannot_merge_work_in_progress=Este pedido de integração está marcado como um trabalho em andamento. Remova o prefixo %s do título quando estiver pronto pulls.data_broken=Este pedido de integração está danificado devido à falta de informação da derivação. -pulls.files_conflicted=Este pedido de integração contém alterações que entram em conflito com o ramo de destino. +pulls.files_conflicted=Este pedido de integração contém modificações que entram em conflito com o ramo de destino. pulls.is_checking=Está em andamento uma verificação de conflitos na integração. Tente novamente daqui a alguns momentos. pulls.is_empty=Este ramo é igual ao ramo de destino. pulls.required_status_check_failed=Algumas das verificações obrigatórias não foram bem sucedidas. pulls.required_status_check_missing=Estão faltando algumas verificações necessárias. pulls.required_status_check_administrator=Uma vez que é administrador, ainda pode realizar a integração deste pedido. pulls.blocked_by_approvals=Este pedido de integração ainda não tem aprovações suficientes. Já foram concedidas %d de um total de%d aprovações. -pulls.blocked_by_rejection=Este pedido de integração tem alterações solicitadas por um revisor oficial. -pulls.blocked_by_official_review_requests=Este Pedido de Integração tem pedidos de revisão oficiais. +pulls.blocked_by_rejection=Este pedido de integração tem modificações solicitadas por um revisor oficial. +pulls.blocked_by_official_review_requests=Este pedido de integração tem pedidos de revisão oficiais. pulls.blocked_by_outdated_branch=Este pedido de integração foi bloqueado por ser obsoleto. -pulls.blocked_by_changed_protected_files_1=Este pedido de integração está bloqueado porque altera um ficheiro protegido: -pulls.blocked_by_changed_protected_files_n=Este pedido de integração está bloqueado porque altera ficheiros protegidos: +pulls.blocked_by_changed_protected_files_1=Este pedido de integração está bloqueado porque modifica um ficheiro protegido: +pulls.blocked_by_changed_protected_files_n=Este pedido de integração está bloqueado porque modifica ficheiros protegidos: pulls.can_auto_merge_desc=A integração constante neste pedido pode ser executada automaticamente. pulls.cannot_auto_merge_desc=A integração constante neste pedido não pode ser executada automaticamente porque existem conflitos. pulls.cannot_auto_merge_helper=Faça a integração manualmente para resolver os conflitos. @@ -1325,8 +1326,8 @@ pulls.num_conflicting_files_1=%d ficheiro em conflito pulls.num_conflicting_files_n=%d ficheiros em conflito pulls.approve_count_1=%d aprovação pulls.approve_count_n=%d aprovações -pulls.reject_count_1=%d pedido de alteração -pulls.reject_count_n=%d pedidos de alteração +pulls.reject_count_1=%d pedido de modificação +pulls.reject_count_n=%d pedidos de modificação pulls.waiting_count_1=%d revisão pendente pulls.waiting_count_n=%d revisões pendentes pulls.wrong_commit_id=ID do cometimento tem que ser um ID de cometimento no ramo de destino @@ -1349,11 +1350,11 @@ pulls.merge_conflict_summary=Mensagem de erro pulls.rebase_conflict=A integração falhou: Houve um conflito durante a mudança de base do cometimento %[1]s. Dica: Tente uma estratégia diferente pulls.rebase_conflict_summary=Mensagem de erro ; %[2]s
%[3]s
-pulls.unrelated_histories=Integração falhada: A cabeça da integração e a base não partilham um histórico comum. Dica: Tente uma estratégia diferente +pulls.unrelated_histories=A integração falhou: A cabeça da integração e a base não partilham um histórico comum. Dica: Tente uma estratégia diferente pulls.merge_out_of_date=Falhou a integração: Enquanto estava a gerar a integração, a base foi modificada. Dica: Tente de novo. pulls.push_rejected=A integração falhou: O envio foi rejeitado. Reveja os automatismos do Git neste repositório. pulls.push_rejected_summary=Mensagem completa de rejeição -pulls.push_rejected_no_message=Integração falhada: O envio foi rejeitado mas não houve qualquer mensagem remota.
Reveja os automatismos do git para este repositório +pulls.push_rejected_no_message=A integração falhou: O envio foi rejeitado mas não houve qualquer mensagem remota.
Reveja os automatismos do git para este repositório pulls.open_unmerged_pull_exists=`Não pode executar uma operação de reabertura porque há um pedido de integração pendente (#%d) com propriedades idênticas.` pulls.status_checking=Algumas verificações estão pendentes pulls.status_checks_success=Todas as verificações foram bem sucedidas @@ -1370,8 +1371,8 @@ pulls.closed_at=`fechou este pedido de integração pulls.reopened_at=`reabriu este pedido de integração %[2]s` pulls.merge_instruction_hint=`Também pode ver as instruções para a linha de comandos.` -pulls.merge_instruction_step1_desc=No repositório do seu projecto, crie um novo ramo e teste as alterações. -pulls.merge_instruction_step2_desc=Integre as alterações e sincronize no Gitea. +pulls.merge_instruction_step1_desc=No repositório do seu projecto, crie um novo ramo e teste as modificações. +pulls.merge_instruction_step2_desc=Integre as modificações e sincronize no Gitea. milestones.new=Nova etapa milestones.open_tab=%d abertas @@ -1483,7 +1484,7 @@ activity.new_issues_count_n=Novas questões activity.new_issue_label=Em aberto activity.title.unresolved_conv_1=%d diálogo não resolvido activity.title.unresolved_conv_n=%d diálogos não resolvidos -activity.unresolved_conv_desc=Estas questões e estes pedidos de integração que foram alterados recentemente ainda não foram resolvidos. +activity.unresolved_conv_desc=Estas questões e estes pedidos de integração que foram modificados recentemente ainda não foram resolvidos. activity.unresolved_conv_label=Em aberto activity.title.releases_1=%d lançamento activity.title.releases_n=%d Lançamentos @@ -1502,8 +1503,8 @@ activity.git_stats_push_to_all_branches=para todos os ramos. activity.git_stats_on_default_branch=No ramo %s, activity.git_stats_file_1=%d ficheiro activity.git_stats_file_n=%d ficheiros -activity.git_stats_files_changed_1=foi alterado -activity.git_stats_files_changed_n=foram alterados +activity.git_stats_files_changed_1=foi modificado +activity.git_stats_files_changed_n=foram modificados activity.git_stats_additions=e houve activity.git_stats_addition_1=%d adição activity.git_stats_addition_n=%d adições @@ -1641,7 +1642,7 @@ settings.add_team=Adicionar equipa settings.add_team_duplicate=A equipa já tem o repositório settings.add_team_success=A equipa agora tem acesso ao repositório. settings.search_team=Procurar equipa… -settings.change_team_permission_tip=A permissão da equipa é definida na página de configurações da equipa e não pode ter alterações específicas de cada repositório +settings.change_team_permission_tip=A permissão da equipa é definida na página de configurações da equipa e não pode ter modificações específicas de cada repositório settings.delete_team_tip=Esta equipa tem acesso a todos os repositórios e não pode ser removida settings.remove_team_success=O acesso da equipa ao repositório foi removido. settings.add_webhook=Adicionar automatismo web @@ -1687,7 +1688,7 @@ settings.event_fork_desc=Feita a derivação do repositório. settings.event_release=Lançamento settings.event_release_desc=Lançamento publicado, modificado ou eliminado num repositório. settings.event_push=Enviar -settings.event_push_desc=Envio de Git para um repositório. +settings.event_push_desc=Envio do Git para um repositório. settings.event_repository=Repositório settings.event_repository_desc=Repositório criado ou eliminado. settings.event_header_issue=Eventos da questão @@ -1717,7 +1718,7 @@ settings.event_pull_request_review_desc=Pedido de integração aprovado, rejeita settings.event_pull_request_sync=Pedido de integração sincronizado settings.event_pull_request_sync_desc=Pedido de integração sincronizado. settings.branch_filter=Filtro por ramo -settings.branch_filter_desc=Lista branca para eventos de envio e de criação e eliminação de ramos, especificada como um padrão glob. Se estiver em branco ou for *, serão reportados eventos para todos os ramos. Veja a documentação github.com/gobwas/glob para detalhes da sintaxe. Exemplos: master, {master,release*}. +settings.branch_filter_desc=Lista de permissões do ramo para eventos de envio e de criação e eliminação de ramos, especificada como um padrão glob. Se estiver em branco ou for *, serão reportados eventos para todos os ramos. Veja a documentação github.com/gobwas/glob para detalhes da sintaxe. Exemplos: trunk, {trunk,release*}. settings.active=Em funcionamento settings.active_helper=Informação sobre eventos despoletados será enviada para o URL deste automatismo web. settings.add_hook_success=O automatismo web foi adicionado. @@ -1738,9 +1739,9 @@ settings.add_msteams_hook_desc=Integrar Microsoft Teams no seu settings.add_feishu_hook_desc=Integrar Feishu no seu repositório. settings.deploy_keys=Chaves de instalação settings.add_deploy_key=Adicionar chave de instalação -settings.deploy_key_desc=Chaves de instalação têm acesso apenas de leitura ao repositório. +settings.deploy_key_desc=Chaves de instalação têm acesso para puxar do repositório apenas em modo de leitura. settings.is_writable=Habilitar acesso de escrita -settings.is_writable_info=Permitir que esta chave de instalação envie para o repositório. +settings.is_writable_info=Permitir a esta chave de instalação enviar para o repositório. settings.no_deploy_keys=Ainda não existem quaisquer chaves de instalação. settings.title=Título settings.deploy_key_content=Conteúdo @@ -1752,18 +1753,18 @@ settings.deploy_key_deletion_desc=Remover uma chave de instalação irá revogar settings.deploy_key_deletion_success=A chave de instalação foi removida. settings.branches=Ramos settings.protected_branch=Salvaguarda do ramo -settings.protected_branch_can_push=Permitir envio? +settings.protected_branch_can_push=Permitir envios? settings.protected_branch_can_push_yes=Pode enviar settings.protected_branch_can_push_no=Não pode enviar settings.branch_protection=Salvaguarda do ramo '%s' settings.protect_this_branch=Habilitar salvaguarda do ramo -settings.protect_this_branch_desc=Impede a eliminação e restringe o envio e integração do Git no ramo. -settings.protect_disable_push=Desabilitar envio +settings.protect_this_branch_desc=Impede a eliminação e restringe envios e integrações do Git no ramo. +settings.protect_disable_push=Desabilitar envios settings.protect_disable_push_desc=O envio para este ramo não será permitido. -settings.protect_enable_push=Habilitar envio +settings.protect_enable_push=Habilitar envios settings.protect_enable_push_desc=Qualquer utilizador com acesso de escrita terá permissão para enviar para este ramo (mas não poderá fazer envios forçados). -settings.protect_whitelist_committers=Lista de permissão restrita para envio -settings.protect_whitelist_committers_desc=Apenas os utilizadores ou equipas da lista terão permissão para enviar para este ramo (mas não poderão fazer envios forçados). +settings.protect_whitelist_committers=Lista de permissões para restringir os envios +settings.protect_whitelist_committers_desc=Apenas os utilizadores ou equipas constantes na lista terão permissão para enviar para este ramo (mas não poderão fazer envios forçados). settings.protect_whitelist_deploy_keys=Dar permissão às chaves de instalação para terem acesso de escrita para enviar. settings.protect_whitelist_users=Utilizadores com permissão para enviar: settings.protect_whitelist_search_users=Procurar utilizadores… @@ -1783,11 +1784,11 @@ settings.protect_approvals_whitelist_enabled_desc=Somente as revisões dos utili settings.protect_approvals_whitelist_users=Revisores com permissão: settings.protect_approvals_whitelist_teams=Equipas com permissão para rever: settings.dismiss_stale_approvals=Descartar aprovações obsoletas -settings.dismiss_stale_approvals_desc=Quando novos cometimentos que mudam o conteúdo do pedido de integração são enviados para o ramo, as aprovações antigas serão descartadas. +settings.dismiss_stale_approvals_desc=Quando novos cometimentos que mudam o conteúdo do pedido de integração forem enviados para o ramo, as aprovações antigas serão descartadas. settings.require_signed_commits=Exigir cometimentos assinados -settings.require_signed_commits_desc=Rejeitar envios para este ramo se não estiverem assinados ou não forem verificáveis. +settings.require_signed_commits_desc=Rejeitar envios para este ramo que não estejam assinados ou que não sejam verificáveis. settings.protect_protected_file_patterns=Padrões de ficheiros protegidos (separados com ponto e vírgula '\;'): -settings.protect_protected_file_patterns_desc=Ficheiros protegidos que não podem ser alterados, mesmo que o utilizador tenha direitos para adicionar, editar ou eliminar ficheiros neste ramo. Múltiplos padrões podem ser separados com ponto e vírgula ('\;'). Veja a documentação em github.com/gobwas/glob para ver a sintaxe. Exemplos: .drone.yml, /docs/**/*.txt. +settings.protect_protected_file_patterns_desc=Ficheiros protegidos que não podem ser modificados, mesmo que o utilizador tenha direitos para adicionar, editar ou eliminar ficheiros neste ramo. Múltiplos padrões podem ser separados com ponto e vírgula ('\;'). Veja a documentação em github.com/gobwas/glob para ver a sintaxe. Exemplos: .drone.yml, /docs/**/*.txt. settings.add_protected_branch=Habilitar salvaguarda settings.delete_protected_branch=Desabilitar salvaguarda settings.update_protect_branch_success=A salvaguarda do ramo '%s' foi modificada. @@ -1795,7 +1796,7 @@ settings.remove_protected_branch_success=A salvaguarda do ramo '%s' foi desabili settings.protected_branch_deletion=Desabilitar salvaguarda do ramo settings.protected_branch_deletion_desc=Desabilitar a salvaguarda do ramo irá permitir que os utilizadores que tenham permissão de escrita enviem para o ramo. Quer continuar? settings.block_rejected_reviews=Bloquear a integração quando há revisões rejeitadas -settings.block_rejected_reviews_desc=A integração não será possível quando as alterações forem pedidas pelos revisores oficiais, mesmo que haja aprovações suficientes. +settings.block_rejected_reviews_desc=A integração não será possível quando as modificações forem pedidas pelos revisores oficiais, mesmo que haja aprovações suficientes. settings.block_on_official_review_requests=Bloquear integração nos pedidos de revisão oficiais settings.block_on_official_review_requests_desc=A integração não será possível quando tiver pedidos de revisão oficiais, mesmo que haja aprovações suficientes. settings.block_outdated_branch=Bloquear integração se o pedido de integração for obsoleto @@ -1814,7 +1815,7 @@ settings.matrix.access_token=Código de acesso settings.matrix.message_type=Tipo de mensagem settings.archive.button=Arquivar repositório settings.archive.header=Arquivar este repositório -settings.archive.text=Arquivar um repositório fará com que seja inteiramente de leitura. Não estará visível no painel de controlo, não poderá receber cometimentos e não será possível criar questões ou pedidos de integração. +settings.archive.text=Arquivar um repositório fará com que fique inteira e exclusivamente de leitura. Não ficará visível no painel de controlo, não poderá receber cometimentos e não será possível criar questões ou pedidos de integração. settings.archive.success=O repositório foi arquivado com sucesso. settings.archive.error=Ocorreu um erro enquanto decorria o processo de arquivo do repositório. Veja os registo para obter mais detalhes. settings.archive.error_ismirror=Não pode arquivar um repositório que tenha sido espelhado. @@ -1863,12 +1864,12 @@ diff.download_diff=Descarregar ficheiro diff diff.show_split_view=Visualização em 2 colunas diff.show_unified_view=Visualização unificada diff.whitespace_button=Espaço em branco -diff.whitespace_show_everything=Mostrar todas as alterações +diff.whitespace_show_everything=Mostrar todas as modificações diff.whitespace_ignore_all_whitespace=Ignorar espaço em branco ao comparar linhas -diff.whitespace_ignore_amount_changes=Ignorar alterações na quantidade de espaço em branco -diff.whitespace_ignore_at_eol=Ignorar alterações do espaço em branco no fim das linhas -diff.stats_desc= %d ficheiros alterados com %d adições e %d eliminações -diff.stats_desc_file=%d alterações: %d adições e %d exclusões +diff.whitespace_ignore_amount_changes=Ignorar modificações na quantidade de espaço em branco +diff.whitespace_ignore_at_eol=Ignorar modificações do espaço em branco no fim das linhas +diff.stats_desc= %d ficheiros modificados com %d adições e %d eliminações +diff.stats_desc_file=%d modificações: %d adições e %d exclusões diff.bin=BIN diff.view_file=Ver ficheiro diff.file_before=Antes @@ -1877,7 +1878,7 @@ diff.file_image_width=Largura diff.file_image_height=Altura diff.file_byte_size=Tamanho diff.file_suppressed=A apresentação das diferenças no ficheiro foi suprimida por ser demasiado grande -diff.too_many_files=Alguns ficheiros não foram mostrados porque foram alterados demasiados ficheiros neste diff +diff.too_many_files=Alguns ficheiros não foram mostrados porque foram modificados demasiados ficheiros neste diff diff.comment.placeholder=Deixar um comentário diff.comment.markdown_info=A formatação com markdown é suportada. diff.comment.add_single_comment=Adicionar um único comentário @@ -1889,7 +1890,7 @@ diff.review.header=Submeter revisão diff.review.placeholder=Comentário da revisão diff.review.comment=Comentar diff.review.approve=Aprovar -diff.review.reject=Solicitar alterações +diff.review.reject=Solicitar modificações diff.committed_by=cometido por diff.protected=Protegido diff.image.side_by_side=Lado a Lado @@ -2052,7 +2053,7 @@ teams.read_access_helper=Os membros podem ver e clonar os repositórios da equip teams.write_access=Acesso de escrita teams.write_access_helper=Os membros podem ler e enviar para os repositórios da equipa. teams.admin_access=Acesso de administrador -teams.admin_access_helper=Os membros podem enviar para e receber dos repositórios da equipa e adicionar colaboradores a esses repositórios. +teams.admin_access_helper=Os membros podem puxar de, e enviar para os repositórios da equipa e adicionar colaboradores a esses repositórios. teams.no_desc=Esta equipa não tem descrição teams.settings=Configurações teams.owners_permission_desc=Os proprietários têm acesso total a todos os repositórios e têm acesso de administrador à organização. @@ -2064,8 +2065,8 @@ teams.delete_team_title=Eliminar equipa teams.delete_team_desc=Eliminar uma equipa revoga o acesso dos seus membros ao repositório. Quer continuar? teams.delete_team_success=A equipa foi eliminada. teams.read_permission_desc=Esta equipa atribui acesso de leitura: os seus membros podem ver e clonar os repositórios da equipa. -teams.write_permission_desc=Esta equipa atribui acesso de escrita: os seus membros podem ler de e enviar para os repositórios da equipa. -teams.admin_permission_desc=Esta equipa atribui o acesso de administração: os seus membros podem ler de, enviar para e adicionar colaboradores aos repositórios da equipa. +teams.write_permission_desc=Esta equipa atribui acesso de escrita: os seus membros podem ler de, e enviar para os repositórios da equipa. +teams.admin_permission_desc=Esta equipa atribui o acesso de administração: os seus membros podem ler de, enviar para, e adicionar colaboradores aos repositórios da equipa. teams.create_repo_permission_desc=Adicionalmente, esta equipa atribui a permissão de criar repositórios: os seus membros podem criar novos repositórios na organização. teams.repositories=Repositórios da equipa teams.search_repo_placeholder=Procurar repositório… @@ -2082,8 +2083,8 @@ teams.specific_repositories_helper=Os membros só terão acesso a repositórios teams.all_repositories=Todos os repositórios teams.all_repositories_helper=A equipa tem acesso a todos os repositórios. Escolher isto irá adicionar todos os repositórios existentes à equipa. teams.all_repositories_read_permission_desc=Esta equipa atribui o acesso de leitura a todos os repositórios: os seus membros podem ver e clonar os repositórios. -teams.all_repositories_write_permission_desc=Esta equipa atribui o acesso de escrita a todos os repositórios: os seus membros podem ler de e enviar para os repositórios. -teams.all_repositories_admin_permission_desc=Esta equipa atribui o acesso de administração a todos os repositórios: os seus membros podem ler de, enviar para e adicionar colaboradores aos repositórios. +teams.all_repositories_write_permission_desc=Esta equipa atribui o acesso de escrita a todos os repositórios: os seus membros podem ler de, e enviar para os repositórios. +teams.all_repositories_admin_permission_desc=Esta equipa atribui o acesso de administração a todos os repositórios: os seus membros podem ler de, enviar para, e adicionar colaboradores aos repositórios. [admin] dashboard=Painel de controlo @@ -2299,6 +2300,7 @@ auths.allowed_domains_helper=Deixe em branco para permitir todos os domínios. S auths.enable_tls=Habilitar encriptação TLS auths.skip_tls_verify=Ignorar verificação TLS auths.pam_service_name=Nome do Serviço PAM +auths.pam_email_domain=Domínio de email do PAM (opcional) auths.oauth2_provider=Fornecedor OAuth2 auths.oauth2_icon_url=URL do ícone auths.oauth2_clientID=ID do cliente (chave) @@ -2434,7 +2436,7 @@ config.mailer_sendmail_args=Argumentos extras para o sendmail config.mailer_sendmail_timeout=Tempo limite do Sendmail config.test_email_placeholder=Email (ex.: teste@exemplo.com) config.send_test_mail=Enviar email de teste -config.test_mail_failed=Ocorreu uma falha ao enviar um email de teste para '%s': %v +config.test_mail_failed=Falhou o envio de um email de teste para '%s': %v config.test_mail_sent=Foi enviado um email de teste para '%s'. config.oauth_config=Configuração OAuth @@ -2469,7 +2471,7 @@ config.git_gc_args=Argumentos da recolha de lixo config.git_migrate_timeout=Prazo da migração config.git_mirror_timeout=Tempo limite do espelhamento config.git_clone_timeout=Prazo da operação de clonagem -config.git_pull_timeout=Prazo da operação de receber +config.git_pull_timeout=Prazo da operação de puxar config.git_gc_timeout=Prazo da operação de recolha de lixo config.log_config=Configuração do registo @@ -2589,7 +2591,7 @@ mirror_sync_push=sincronizou cometimentos para %[3]s%[2]s para %[3]s do espelho mirror_sync_delete=sincronizou e eliminou a referência %[2]s em %[3]s do ficheiro approve_pull_request=`aprovou %s#%[2]s` -reject_pull_request=`sugeriu alterações para %s#%[2]s` +reject_pull_request=`sugeriu modificações para %s#%[2]s` publish_release=`lançou "%[4]s" à %[3]s` review_dismissed=`descartou a revisão de %[4]s para %[3]s#%[2]s` review_dismissed_reason=Motivo: @@ -2618,8 +2620,8 @@ raw_seconds=segundos raw_minutes=minutos [dropzone] -default_message=Largue os ficheiros aqui ou clique aqui para os enviar. -invalid_input_type=Não pode enviar ficheiros deste tipo. +default_message=Largue os ficheiros aqui ou clique aqui para os carregar. +invalid_input_type=Não pode carregar ficheiros deste tipo. file_too_big=O tamanho do ficheiro ({{filesize}} MB) excede o tamanho máximo de ({{maxFilesize}} MB). remove_file=Remover ficheiro diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index f098b3a45abb8..9102552148503 100644 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -1285,6 +1285,8 @@ issues.review.resolved_by=标记问题为已解决 issues.assignee.error=因为未知原因,并非所有的指派都成功。 issues.reference_issue.body=内容 +compare.compare_base=基准分支 +compare.compare_head=比较 pulls.desc=启用合并请求和代码评审。 pulls.new=创建合并请求 @@ -1526,7 +1528,7 @@ search.fuzzy=模糊 search.match=匹配 search.results=在 %s 中搜索 "%s" 的结果 -settings=仓库设置 +settings=设置 settings.desc=设置是你可以管理仓库设置的地方 settings.options=仓库 settings.collaboration=协作者 @@ -1535,7 +1537,7 @@ settings.collaboration.write=可写权限 settings.collaboration.read=可读权限 settings.collaboration.owner=所有者 settings.collaboration.undefined=未定义 -settings.hooks=管理 Web 钩子 +settings.hooks=Web 钩子 settings.githooks=管理 Git 钩子 settings.basic_settings=基本设置 settings.mirror_settings=镜像设置 @@ -1745,7 +1747,7 @@ settings.add_telegram_hook_desc=将 Telegram 集成到您的仓 settings.add_matrix_hook_desc=将 Matrix 集成到您的仓库中。 settings.add_msteams_hook_desc=将 Microsoft Teams 集成到您的仓库中。 settings.add_feishu_hook_desc=将 Feishu 集成到您的仓库中。 -settings.deploy_keys=管理部署密钥 +settings.deploy_keys=部署密钥 settings.add_deploy_key=添加部署密钥 settings.deploy_key_desc=部署密钥具有对仓库的只读拉取权限。 settings.is_writable=启用写权限 @@ -1759,7 +1761,7 @@ settings.add_key_success=部署密钥 '%s' 添加成功。 settings.deploy_key_deletion=删除部署密钥 settings.deploy_key_deletion_desc=删除部署密钥将吊销对此存储库的访问权限。继续? settings.deploy_key_deletion_success=部署密钥已删除。 -settings.branches=分支列表 +settings.branches=分支 settings.protected_branch=分支保护 settings.protected_branch_can_push=允许推吗? settings.protected_branch_can_push_yes=你可以推 @@ -1887,6 +1889,7 @@ diff.file_image_width=宽度 diff.file_image_height=高度 diff.file_byte_size=大小 diff.file_suppressed=文件差异内容过多而无法显示 +diff.file_suppressed_line_too_long=文件差异因一行或多行过长而隐藏 diff.too_many_files=部分文件因为文件数量过多而无法显示 diff.comment.placeholder=留下评论 diff.comment.markdown_info=支持使用Markdown格式。 @@ -2309,6 +2312,7 @@ auths.allowed_domains_helper=置空将允许所有域名,每个域名用逗号 auths.enable_tls=启用 TLS 加密 auths.skip_tls_verify=忽略 TLS 验证 auths.pam_service_name=PAM 服务名称 +auths.pam_email_domain=PAM 电子邮件域(可选) auths.oauth2_provider=OAuth2 提供程序 auths.oauth2_icon_url=图标 URL auths.oauth2_clientID=客户端 ID (键) @@ -2408,6 +2412,7 @@ config.db_path=数据库路径 config.service_config=服务配置 config.register_email_confirm=需要电子邮件确认注册 config.disable_register=禁止用户注册 +config.allow_only_internal_registration=只允许通过 Gitea 进行注册 config.allow_only_external_registration=仅允许通过外部服务注册 config.enable_openid_signup=启用 OpenID 自注册 config.enable_openid_signin=启用 OpenID 登录 diff --git a/options/locale/locale_zh-TW.ini b/options/locale/locale_zh-TW.ini index ff45c62c19e3d..bf58fecf14b61 100644 --- a/options/locale/locale_zh-TW.ini +++ b/options/locale/locale_zh-TW.ini @@ -2311,6 +2311,7 @@ auths.allowed_domains_helper=留白以允許所有域名。以逗號 (',') 分 auths.enable_tls=啟用 TLS 加密 auths.skip_tls_verify=忽略 TLS 驗證 auths.pam_service_name=PAM 服務名稱 +auths.pam_email_domain=PAM 電子信箱域名(非必要) auths.oauth2_provider=OAuth2 提供者 auths.oauth2_icon_url=圖示 URL auths.oauth2_clientID=客戶端 ID (金鑰) diff --git a/routers/admin/repos.go b/routers/admin/repos.go index 82b8cc1a7db82..d23f7c3d5a613 100644 --- a/routers/admin/repos.go +++ b/routers/admin/repos.go @@ -47,6 +47,10 @@ func DeleteRepo(ctx *context.Context) { return } + if ctx.Repo != nil && ctx.Repo.GitRepo != nil && ctx.Repo.Repository != nil && ctx.Repo.Repository.ID == repo.ID { + ctx.Repo.GitRepo.Close() + } + if err := repo_service.DeleteRepository(ctx.User, repo); err != nil { ctx.ServerError("DeleteRepository", err) return diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 0876f3273cb50..5656730608151 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -888,7 +888,7 @@ func Routes() *web.Route { Post(reqToken(), mustNotBeArchived, bind(api.CreatePullRequestOption{}), repo.CreatePullRequest) m.Group("/{index}", func() { m.Combo("").Get(repo.GetPullRequest). - Patch(reqToken(), reqRepoWriter(models.UnitTypePullRequests), bind(api.EditPullRequestOption{}), repo.EditPullRequest) + Patch(reqToken(), bind(api.EditPullRequestOption{}), repo.EditPullRequest) m.Get(".diff", repo.DownloadPullDiff) m.Get(".patch", repo.DownloadPullPatch) m.Post("/update", reqToken(), repo.UpdatePullRequest) diff --git a/routers/api/v1/repo/repo.go b/routers/api/v1/repo/repo.go index d8600c0fcdd63..7a3160fa9937e 100644 --- a/routers/api/v1/repo/repo.go +++ b/routers/api/v1/repo/repo.go @@ -889,6 +889,10 @@ func Delete(ctx *context.APIContext) { return } + if ctx.Repo.GitRepo != nil { + ctx.Repo.GitRepo.Close() + } + if err := repo_service.DeleteRepository(ctx.User, repo); err != nil { ctx.Error(http.StatusInternalServerError, "DeleteRepository", err) return diff --git a/routers/private/internal.go b/routers/private/internal.go index 3b2fcddd8a617..9202e6721881d 100644 --- a/routers/private/internal.go +++ b/routers/private/internal.go @@ -23,7 +23,7 @@ import ( func CheckInternalToken(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { tokens := req.Header.Get("Authorization") - fields := strings.Fields(tokens) + fields := strings.SplitN(tokens, " ", 2) if len(fields) != 2 || fields[0] != "Bearer" || fields[1] != setting.InternalToken { log.Debug("Forbidden attempt to access internal url: Authorization header: %s", tokens) http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) diff --git a/routers/repo/http.go b/routers/repo/http.go index e98f528f36c96..30d382b8ef18d 100644 --- a/routers/repo/http.go +++ b/routers/repo/http.go @@ -22,15 +22,12 @@ import ( "time" "code.gitea.io/gitea/models" - "code.gitea.io/gitea/modules/auth/sso" - "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/process" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" - "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" repo_service "code.gitea.io/gitea/services/repository" ) @@ -153,11 +150,8 @@ func httpBase(ctx *context.Context) (h *serviceHandler) { // Only public pull don't need auth. isPublicPull := repoExist && !repo.IsPrivate && isPull var ( - askAuth = !isPublicPull || setting.Service.RequireSignInView - authUser *models.User - authUsername string - authPasswd string - environ []string + askAuth = !isPublicPull || setting.Service.RequireSignInView + environ []string ) // don't allow anonymous pulls if organization is not public @@ -172,108 +166,33 @@ func httpBase(ctx *context.Context) (h *serviceHandler) { // check access if askAuth { - authUsername = ctx.Req.Header.Get(setting.ReverseProxyAuthUser) - if setting.Service.EnableReverseProxyAuth && len(authUsername) > 0 { - authUser, err = models.GetUserByName(authUsername) - if err != nil { - ctx.HandleText(401, "reverse proxy login error, got error while running GetUserByName") - return - } - } else { - authHead := ctx.Req.Header.Get("Authorization") - if len(authHead) == 0 { - ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=\".\"") - ctx.Error(http.StatusUnauthorized) - return - } + // rely on the results of Contexter + if !ctx.IsSigned { + // TODO: support digit auth - which would be Authorization header with digit + ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=\".\"") + ctx.Error(http.StatusUnauthorized) + return + } - auths := strings.Fields(authHead) - // currently check basic auth - // TODO: support digit auth - // FIXME: middlewares/context.go did basic auth check already, - // maybe could use that one. - if len(auths) != 2 || auths[0] != "Basic" { - ctx.HandleText(http.StatusUnauthorized, "no basic auth and digit auth") + if ctx.IsBasicAuth && ctx.Data["IsApiToken"] != true { + _, err = models.GetTwoFactorByUID(ctx.User.ID) + if err == nil { + // TODO: This response should be changed to "invalid credentials" for security reasons once the expectation behind it (creating an app token to authenticate) is properly documented + ctx.HandleText(http.StatusUnauthorized, "Users with two-factor authentication enabled cannot perform HTTP/HTTPS operations via plain username and password. Please create and use a personal access token on the user settings page") return - } - authUsername, authPasswd, err = base.BasicAuthDecode(auths[1]) - if err != nil { - ctx.HandleText(http.StatusUnauthorized, "no basic auth and digit auth") + } else if !models.IsErrTwoFactorNotEnrolled(err) { + ctx.ServerError("IsErrTwoFactorNotEnrolled", err) return } - - // Check if username or password is a token - isUsernameToken := len(authPasswd) == 0 || authPasswd == "x-oauth-basic" - // Assume username is token - authToken := authUsername - if !isUsernameToken { - // Assume password is token - authToken = authPasswd - } - uid := sso.CheckOAuthAccessToken(authToken) - if uid != 0 { - ctx.Data["IsApiToken"] = true - - authUser, err = models.GetUserByID(uid) - if err != nil { - ctx.ServerError("GetUserByID", err) - return - } - } - // Assume password is a token. - token, err := models.GetAccessTokenBySHA(authToken) - if err == nil { - authUser, err = models.GetUserByID(token.UID) - if err != nil { - ctx.ServerError("GetUserByID", err) - return - } - - token.UpdatedUnix = timeutil.TimeStampNow() - if err = models.UpdateAccessToken(token); err != nil { - ctx.ServerError("UpdateAccessToken", err) - } - } else if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { - log.Error("GetAccessTokenBySha: %v", err) - } - - if authUser == nil { - // Check username and password - authUser, err = models.UserSignIn(authUsername, authPasswd) - if err != nil { - if models.IsErrUserProhibitLogin(err) { - ctx.HandleText(http.StatusForbidden, "User is not permitted to login") - return - } else if !models.IsErrUserNotExist(err) { - ctx.ServerError("UserSignIn error: %v", err) - return - } - } - - if authUser == nil { - ctx.HandleText(http.StatusUnauthorized, fmt.Sprintf("invalid credentials from %s", ctx.RemoteAddr())) - return - } - - _, err = models.GetTwoFactorByUID(authUser.ID) - if err == nil { - // TODO: This response should be changed to "invalid credentials" for security reasons once the expectation behind it (creating an app token to authenticate) is properly documented - ctx.HandleText(http.StatusUnauthorized, "Users with two-factor authentication enabled cannot perform HTTP/HTTPS operations via plain username and password. Please create and use a personal access token on the user settings page") - return - } else if !models.IsErrTwoFactorNotEnrolled(err) { - ctx.ServerError("IsErrTwoFactorNotEnrolled", err) - return - } - } } - if !authUser.IsActive || authUser.ProhibitLogin { + if !ctx.User.IsActive || ctx.User.ProhibitLogin { ctx.HandleText(http.StatusForbidden, "Your account is disabled.") return } if repoExist { - perm, err := models.GetUserRepoPermission(repo, authUser) + perm, err := models.GetUserRepoPermission(repo, ctx.User) if err != nil { ctx.ServerError("GetUserRepoPermission", err) return @@ -293,14 +212,14 @@ func httpBase(ctx *context.Context) (h *serviceHandler) { environ = []string{ models.EnvRepoUsername + "=" + username, models.EnvRepoName + "=" + reponame, - models.EnvPusherName + "=" + authUser.Name, - models.EnvPusherID + fmt.Sprintf("=%d", authUser.ID), + models.EnvPusherName + "=" + ctx.User.Name, + models.EnvPusherID + fmt.Sprintf("=%d", ctx.User.ID), models.EnvIsDeployKey + "=false", models.EnvAppURL + "=" + setting.AppURL, } - if !authUser.KeepEmailPrivate { - environ = append(environ, models.EnvPusherEmail+"="+authUser.Email) + if !ctx.User.KeepEmailPrivate { + environ = append(environ, models.EnvPusherEmail+"="+ctx.User.Email) } if isWiki { @@ -336,7 +255,7 @@ func httpBase(ctx *context.Context) (h *serviceHandler) { return } - repo, err = repo_service.PushCreateRepo(authUser, owner, reponame) + repo, err = repo_service.PushCreateRepo(ctx.User, owner, reponame) if err != nil { log.Error("pushCreateRepo: %v", err) ctx.Status(http.StatusNotFound) diff --git a/routers/repo/setting.go b/routers/repo/setting.go index b37ac03112cf8..51bf68b15b4b1 100644 --- a/routers/repo/setting.go +++ b/routers/repo/setting.go @@ -544,6 +544,11 @@ func SettingsPost(ctx *context.Context) { return } + // Close the gitrepository before doing this. + if ctx.Repo.GitRepo != nil { + ctx.Repo.GitRepo.Close() + } + if err := repo_service.DeleteRepository(ctx.User, ctx.Repo.Repository); err != nil { ctx.ServerError("DeleteRepository", err) return diff --git a/routers/routes/base.go b/routers/routes/base.go index 743582d4a56dc..0b784508a7902 100644 --- a/routers/routes/base.go +++ b/routers/routes/base.go @@ -15,6 +15,7 @@ import ( "strings" "time" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth/sso" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/httpcache" @@ -171,8 +172,19 @@ func Recovery() func(next http.Handler) http.Handler { }, } - // Get user from session if logged in. - user, _ := sso.SignedInUser(req, w, &store, sessionStore) + var user *models.User + if apiContext := context.GetAPIContext(req); apiContext != nil { + user = apiContext.User + } + if user == nil { + if ctx := context.GetContext(req); ctx != nil { + user = ctx.User + } + } + if user == nil { + // Get user from session if logged in - do not attempt to sign-in + user = sso.SessionUser(sessionStore) + } if user != nil { store.Data["IsSigned"] = true store.Data["SignedUser"] = user diff --git a/services/lfs/locks.go b/services/lfs/locks.go index 6bbe43d36bbb2..ad204c46e2b26 100644 --- a/services/lfs/locks.go +++ b/services/lfs/locks.go @@ -31,15 +31,6 @@ func checkIsValidRequest(ctx *context.Context) bool { writeStatus(ctx, http.StatusBadRequest) return false } - if !ctx.IsSigned { - user, _, _, err := parseToken(ctx.Req.Header.Get("Authorization")) - if err != nil { - ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") - writeStatus(ctx, http.StatusUnauthorized) - return false - } - ctx.User = user - } return true } @@ -73,19 +64,21 @@ func GetListLockHandler(ctx *context.Context) { // Status is written in checkIsValidRequest return } - ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) rv, _ := unpack(ctx) repository, err := models.GetRepositoryByOwnerAndName(rv.User, rv.Repo) if err != nil { log.Debug("Could not find repository: %s/%s - %s", rv.User, rv.Repo, err) - writeStatus(ctx, 404) + ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") + ctx.JSON(401, api.LFSLockError{ + Message: "You must have pull access to list locks", + }) return } repository.MustOwner() - authenticated := authenticate(ctx, repository, rv.Authorization, false) + authenticated := authenticate(ctx, repository, rv.Authorization, true, false) if !authenticated { ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") ctx.JSON(http.StatusUnauthorized, api.LFSLockError{ @@ -93,6 +86,7 @@ func GetListLockHandler(ctx *context.Context) { }) return } + ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) cursor := ctx.QueryInt("cursor") if cursor < 0 { @@ -160,7 +154,6 @@ func PostLockHandler(ctx *context.Context) { // Status is written in checkIsValidRequest return } - ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) userName := ctx.Params("username") repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git") @@ -169,12 +162,15 @@ func PostLockHandler(ctx *context.Context) { repository, err := models.GetRepositoryByOwnerAndName(userName, repoName) if err != nil { log.Error("Unable to get repository: %s/%s Error: %v", userName, repoName, err) - writeStatus(ctx, 404) + ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") + ctx.JSON(401, api.LFSLockError{ + Message: "You must have push access to create locks", + }) return } repository.MustOwner() - authenticated := authenticate(ctx, repository, authorization, true) + authenticated := authenticate(ctx, repository, authorization, true, true) if !authenticated { ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") ctx.JSON(http.StatusUnauthorized, api.LFSLockError{ @@ -183,6 +179,8 @@ func PostLockHandler(ctx *context.Context) { return } + ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) + var req api.LFSLockRequest bodyReader := ctx.Req.Body defer bodyReader.Close() @@ -229,7 +227,6 @@ func VerifyLockHandler(ctx *context.Context) { // Status is written in checkIsValidRequest return } - ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) userName := ctx.Params("username") repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git") @@ -238,12 +235,15 @@ func VerifyLockHandler(ctx *context.Context) { repository, err := models.GetRepositoryByOwnerAndName(userName, repoName) if err != nil { log.Error("Unable to get repository: %s/%s Error: %v", userName, repoName, err) - writeStatus(ctx, 404) + ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") + ctx.JSON(401, api.LFSLockError{ + Message: "You must have push access to verify locks", + }) return } repository.MustOwner() - authenticated := authenticate(ctx, repository, authorization, true) + authenticated := authenticate(ctx, repository, authorization, true, true) if !authenticated { ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") ctx.JSON(http.StatusUnauthorized, api.LFSLockError{ @@ -252,6 +252,8 @@ func VerifyLockHandler(ctx *context.Context) { return } + ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) + cursor := ctx.QueryInt("cursor") if cursor < 0 { cursor = 0 @@ -296,7 +298,6 @@ func UnLockHandler(ctx *context.Context) { // Status is written in checkIsValidRequest return } - ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) userName := ctx.Params("username") repoName := strings.TrimSuffix(ctx.Params("reponame"), ".git") @@ -305,12 +306,15 @@ func UnLockHandler(ctx *context.Context) { repository, err := models.GetRepositoryByOwnerAndName(userName, repoName) if err != nil { log.Error("Unable to get repository: %s/%s Error: %v", userName, repoName, err) - writeStatus(ctx, 404) + ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") + ctx.JSON(401, api.LFSLockError{ + Message: "You must have push access to delete locks", + }) return } repository.MustOwner() - authenticated := authenticate(ctx, repository, authorization, true) + authenticated := authenticate(ctx, repository, authorization, true, true) if !authenticated { ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs") ctx.JSON(http.StatusUnauthorized, api.LFSLockError{ @@ -319,6 +323,8 @@ func UnLockHandler(ctx *context.Context) { return } + ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType) + var req api.LFSLockDeleteRequest bodyReader := ctx.Req.Body defer bodyReader.Close() diff --git a/services/lfs/server.go b/services/lfs/server.go index cd9a3fd7a1594..ee7d3bc79a3ee 100644 --- a/services/lfs/server.go +++ b/services/lfs/server.go @@ -94,7 +94,7 @@ func getAuthenticatedRepoAndMeta(ctx *context.Context, rc *requestContext, p lfs return nil, nil } - if !authenticate(ctx, repository, rc.Authorization, requireWrite) { + if !authenticate(ctx, repository, rc.Authorization, false, requireWrite) { requireAuth(ctx) return nil, nil } @@ -232,7 +232,7 @@ func PostHandler(ctx *context.Context) { return } - if !authenticate(ctx, repository, rc.Authorization, true) { + if !authenticate(ctx, repository, rc.Authorization, false, true) { requireAuth(ctx) return } @@ -322,7 +322,7 @@ func BatchHandler(ctx *context.Context) { requireWrite = true } - if !authenticate(ctx, repository, reqCtx.Authorization, requireWrite) { + if !authenticate(ctx, repository, reqCtx.Authorization, false, requireWrite) { requireAuth(ctx) return } @@ -561,7 +561,7 @@ func logRequest(r *http.Request, status int) { // authenticate uses the authorization string to determine whether // or not to proceed. This server assumes an HTTP Basic auth format. -func authenticate(ctx *context.Context, repository *models.Repository, authorization string, requireWrite bool) bool { +func authenticate(ctx *context.Context, repository *models.Repository, authorization string, requireSigned, requireWrite bool) bool { accessMode := models.AccessModeRead if requireWrite { accessMode = models.AccessModeWrite @@ -575,89 +575,72 @@ func authenticate(ctx *context.Context, repository *models.Repository, authoriza } canRead := perm.CanAccess(accessMode, models.UnitTypeCode) - if canRead { + if canRead && (!requireSigned || ctx.IsSigned) { return true } - user, repo, opStr, err := parseToken(authorization) + user, err := parseToken(authorization, repository, accessMode) if err != nil { // Most of these are Warn level - the true internal server errors are logged in parseToken already log.Warn("Authentication failure for provided token with Error: %v", err) return false } ctx.User = user - if opStr == "basic" { - perm, err = models.GetUserRepoPermission(repository, ctx.User) - if err != nil { - log.Error("Unable to GetUserRepoPermission for user %-v in repo %-v Error: %v", ctx.User, repository) - return false - } - return perm.CanAccess(accessMode, models.UnitTypeCode) + return true +} + +func handleLFSToken(tokenSHA string, target *models.Repository, mode models.AccessMode) (*models.User, error) { + if !strings.Contains(tokenSHA, ".") { + return nil, nil } - if repository.ID == repo.ID { - if requireWrite && opStr != "upload" { - return false + token, err := jwt.ParseWithClaims(tokenSHA, &Claims{}, func(t *jwt.Token) (interface{}, error) { + if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"]) } - return true + return setting.LFS.JWTSecretBytes, nil + }) + if err != nil { + return nil, nil } - return false -} -func parseToken(authorization string) (*models.User, *models.Repository, string, error) { - if authorization == "" { - return nil, nil, "unknown", fmt.Errorf("No token") + claims, claimsOk := token.Claims.(*Claims) + if !token.Valid || !claimsOk { + return nil, fmt.Errorf("invalid token claim") } - if strings.HasPrefix(authorization, "Bearer ") { - token, err := jwt.ParseWithClaims(authorization[7:], &Claims{}, func(t *jwt.Token) (interface{}, error) { - if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"]) - } - return setting.LFS.JWTSecretBytes, nil - }) - if err != nil { - // The error here is WARN level because it is caused by bad authorization rather than an internal server error - return nil, nil, "unknown", err - } - claims, claimsOk := token.Claims.(*Claims) - if !token.Valid || !claimsOk { - return nil, nil, "unknown", fmt.Errorf("Token claim invalid") - } - r, err := models.GetRepositoryByID(claims.RepoID) - if err != nil { - log.Error("Unable to GetRepositoryById[%d]: Error: %v", claims.RepoID, err) - return nil, nil, claims.Op, err - } - u, err := models.GetUserByID(claims.UserID) - if err != nil { - log.Error("Unable to GetUserById[%d]: Error: %v", claims.UserID, err) - return nil, r, claims.Op, err - } - return u, r, claims.Op, nil + + if claims.RepoID != target.ID { + return nil, fmt.Errorf("invalid token claim") } - if strings.HasPrefix(authorization, "Basic ") { - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(authorization, "Basic ")) - if err != nil { - return nil, nil, "basic", err - } - cs := string(c) - i := strings.IndexByte(cs, ':') - if i < 0 { - return nil, nil, "basic", fmt.Errorf("Basic auth invalid") - } - user, password := cs[:i], cs[i+1:] - u, err := models.GetUserByName(user) - if err != nil { - log.Error("Unable to GetUserByName[%d]: Error: %v", user, err) - return nil, nil, "basic", err - } - if !u.IsPasswordSet() || !u.ValidatePassword(password) { - return nil, nil, "basic", fmt.Errorf("Basic auth failed") - } - return u, nil, "basic", nil + if mode == models.AccessModeWrite && claims.Op != "upload" { + return nil, fmt.Errorf("invalid token claim") } - return nil, nil, "unknown", fmt.Errorf("Token not found") + u, err := models.GetUserByID(claims.UserID) + if err != nil { + log.Error("Unable to GetUserById[%d]: Error: %v", claims.UserID, err) + return nil, err + } + return u, nil +} + +func parseToken(authorization string, target *models.Repository, mode models.AccessMode) (*models.User, error) { + if authorization == "" { + return nil, fmt.Errorf("no token") + } + + parts := strings.SplitN(authorization, " ", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("no token") + } + tokenSHA := parts[1] + switch strings.ToLower(parts[0]) { + case "bearer": + fallthrough + case "token": + return handleLFSToken(tokenSHA, target, mode) + } + return nil, fmt.Errorf("token not found") } func requireAuth(ctx *context.Context) { diff --git a/services/pull/check_test.go b/services/pull/check_test.go index 33a230e5ab86b..f6614ea0ad27f 100644 --- a/services/pull/check_test.go +++ b/services/pull/check_test.go @@ -6,7 +6,6 @@ package pull import ( - "context" "strconv" "testing" "time" @@ -54,9 +53,9 @@ func TestPullRequest_AddToTaskQueue(t *testing.T) { assert.True(t, has) assert.NoError(t, err) - prQueue.Run(func(_ context.Context, shutdown func()) { + prQueue.Run(func(shutdown func()) { queueShutdown = append(queueShutdown, shutdown) - }, func(_ context.Context, terminate func()) { + }, func(terminate func()) { queueTerminate = append(queueTerminate, terminate) }) diff --git a/templates/admin/auth/edit.tmpl b/templates/admin/auth/edit.tmpl index e4d7a2e1e1fb2..d825cd7d12de6 100644 --- a/templates/admin/auth/edit.tmpl +++ b/templates/admin/auth/edit.tmpl @@ -53,7 +53,6 @@
-

{{.i18n.Tr "admin.auths.bind_password_helper"}}

{{end}}
diff --git a/templates/admin/auth/source/ldap.tmpl b/templates/admin/auth/source/ldap.tmpl index 584538f53bc91..1cbcb2fd415ea 100644 --- a/templates/admin/auth/source/ldap.tmpl +++ b/templates/admin/auth/source/ldap.tmpl @@ -28,7 +28,6 @@
-

{{.i18n.Tr "admin.auths.bind_password_helper"}}

diff --git a/templates/explore/code.tmpl b/templates/explore/code.tmpl index 6332413a173a6..8cc1b71a7ad0a 100644 --- a/templates/explore/code.tmpl +++ b/templates/explore/code.tmpl @@ -50,7 +50,7 @@ {{.}} {{end}} -
    {{.FormattedLines | Safe}}
+ {{.FormattedLines | Safe}} diff --git a/templates/repo/diff/box.tmpl b/templates/repo/diff/box.tmpl index 582b66d5db6e5..d8678c95c6e31 100644 --- a/templates/repo/diff/box.tmpl +++ b/templates/repo/diff/box.tmpl @@ -117,7 +117,7 @@
-
+
{{if $file.IsBin}}
{{$.i18n.Tr "repo.diff.bin_not_shown"}}
{{else}} @@ -131,7 +131,7 @@ {{end}}
{{if or $isImage $isCsv}} -
+
{{if $isImage}} {{template "repo/diff/image_diff" dict "file" . "root" $}} diff --git a/templates/repo/diff/comments.tmpl b/templates/repo/diff/comments.tmpl index 6e39fbe854991..c55da8576e185 100644 --- a/templates/repo/diff/comments.tmpl +++ b/templates/repo/diff/comments.tmpl @@ -3,7 +3,7 @@ {{ $createdStr:= TimeSinceUnix .CreatedUnix $.root.Lang }}
{{if .OriginalAuthor }} - + {{else}} {{avatar .Poster}} diff --git a/templates/repo/issue/milestone_issues.tmpl b/templates/repo/issue/milestone_issues.tmpl index 897d297d37cb5..3f4a13570f68d 100644 --- a/templates/repo/issue/milestone_issues.tmpl +++ b/templates/repo/issue/milestone_issues.tmpl @@ -2,14 +2,9 @@
{{template "repo/header" .}}
-
+

{{.Milestone.Name}}

-
- {{.Milestone.RenderedContent|Str2html}} -
-
-
{{if not .Repository.IsArchived}}
@@ -20,6 +15,11 @@
{{end}}
+
+
+ {{.Milestone.RenderedContent|Str2html}} +
+
{{ $closedDate:= TimeSinceUnix .Milestone.ClosedDateUnix $.Lang }} diff --git a/templates/repo/issue/view_content.tmpl b/templates/repo/issue/view_content.tmpl index e353d71ee969a..35c7cdd7a2c30 100644 --- a/templates/repo/issue/view_content.tmpl +++ b/templates/repo/issue/view_content.tmpl @@ -13,7 +13,7 @@
{{if .Issue.OriginalAuthor }} - + {{else}} {{avatar .Issue.Poster}} diff --git a/templates/repo/issue/view_content/comments.tmpl b/templates/repo/issue/view_content/comments.tmpl index e02f3ddc894c4..77757207cf2e9 100644 --- a/templates/repo/issue/view_content/comments.tmpl +++ b/templates/repo/issue/view_content/comments.tmpl @@ -13,7 +13,7 @@ {{if eq .Type 0}}
{{if .OriginalAuthor }} - + {{else}} {{avatar .Poster}} diff --git a/templates/repo/issue/view_content/sidebar.tmpl b/templates/repo/issue/view_content/sidebar.tmpl index 30acb839b50ac..e3530fc45ba1f 100644 --- a/templates/repo/issue/view_content/sidebar.tmpl +++ b/templates/repo/issue/view_content/sidebar.tmpl @@ -6,12 +6,12 @@
+
    {{.FormattedLines | Safe}}
{{.FormattedLines | Safe}}
diff --git a/templates/repo/settings/branches.tmpl b/templates/repo/settings/branches.tmpl index fbe9a7463e5e2..ccf6abbb81c01 100644 --- a/templates/repo/settings/branches.tmpl +++ b/templates/repo/settings/branches.tmpl @@ -35,7 +35,7 @@ {{end}}
- +
{{end}} diff --git a/templates/repo/view_file.tmpl b/templates/repo/view_file.tmpl index e3a52a887083a..8c73f1252b85f 100644 --- a/templates/repo/view_file.tmpl +++ b/templates/repo/view_file.tmpl @@ -112,7 +112,7 @@ {{if $.Permission.CanRead $.UnitTypeIssues}} -