Merge branch 'main' into lunny/remove_repo_ref_in_commit

This commit is contained in:
Lunny Xiao 2025-10-17 20:05:10 -07:00 committed by GitHub
commit 61032b8b08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
116 changed files with 4268 additions and 1562 deletions

View File

@ -10,8 +10,8 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'go-gitea/gitea'
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'go-gitea/gitea'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: crowdin/github-action@v1
with:
upload_sources: true

View File

@ -34,7 +34,7 @@ jobs:
swagger: ${{ steps.changes.outputs.swagger }}
yaml: ${{ steps.changes.outputs.yaml }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: dorny/paths-filter@v3
id: changes
with:

View File

@ -16,8 +16,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -31,7 +31,7 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: astral-sh/setup-uv@v6
- run: uv python install 3.12
- uses: pnpm/action-setup@v4
@ -47,7 +47,7 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: astral-sh/setup-uv@v6
- run: uv python install 3.12
- run: make deps-py
@ -58,7 +58,7 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v5
with:
@ -71,8 +71,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -83,8 +83,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -100,8 +100,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -115,8 +115,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -128,7 +128,7 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v5
with:
@ -144,8 +144,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -176,7 +176,7 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: pnpm/action-setup@v4
- uses: actions/setup-node@v5
with:
@ -189,8 +189,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true

View File

@ -38,8 +38,8 @@ jobs:
ports:
- "9000:9000"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -66,8 +66,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -124,8 +124,8 @@ jobs:
ports:
- 10000:10000
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -177,8 +177,8 @@ jobs:
- "587:587"
- "993:993"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -217,8 +217,8 @@ jobs:
ports:
- 10000:10000
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true

View File

@ -18,8 +18,8 @@ jobs:
needs: files-changed
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true

View File

@ -15,6 +15,6 @@ jobs:
contents: read
pull-requests: write
steps:
- uses: actions/labeler@v5
- uses: actions/labeler@v6
with:
sync-labels: true

View File

@ -12,11 +12,11 @@ jobs:
nightly-binary:
runs-on: namespace-profile-gitea-release-binary
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -61,11 +61,11 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -103,11 +103,11 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true

View File

@ -13,11 +13,11 @@ jobs:
binary:
runs-on: namespace-profile-gitea-release-binary
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -71,7 +71,7 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
@ -112,7 +112,7 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force

View File

@ -17,11 +17,11 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
@ -75,7 +75,7 @@ jobs:
permissions:
packages: write # to publish to ghcr.io
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force
@ -118,7 +118,7 @@ jobs:
docker-rootless:
runs-on: namespace-profile-gitea-release-docker
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
# fetch all commits instead of only the last as some branches are long lived and could have many between versions
# fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567
- run: git fetch --unshallow --quiet --tags --force

File diff suppressed because one or more lines are too long

View File

@ -52,21 +52,16 @@ export default defineConfig([
},
plugins: {
'@eslint-community/eslint-comments': comments,
// @ts-expect-error
'@stylistic': stylistic,
'@typescript-eslint': typescriptPlugin.plugin,
'array-func': arrayFunc,
// @ts-expect-error -- https://github.com/un-ts/eslint-plugin-import-x/issues/203
'import-x': importPlugin,
'no-use-extend-native': noUseExtendNative,
// @ts-expect-error
regexp,
// @ts-expect-error
sonarjs,
// @ts-expect-error
unicorn,
github,
// @ts-expect-error
wc,
},
settings: {
@ -595,6 +590,7 @@ export default defineConfig([
'no-unused-vars': [0], // handled by @typescript-eslint/no-unused-vars
'no-use-before-define': [0], // handled by @typescript-eslint/no-use-before-define
'no-use-extend-native/no-use-extend-native': [2],
'no-useless-assignment': [2],
'no-useless-backreference': [2],
'no-useless-call': [2],
'no-useless-catch': [2],
@ -900,7 +896,6 @@ export default defineConfig([
'yoda': [2, 'never'],
},
},
// @ts-expect-error
{
...playwright.configs['flat/recommended'],
files: ['tests/e2e/**'],
@ -916,7 +911,6 @@ export default defineConfig([
},
},
extends: [
// @ts-expect-error
vue.configs['flat/recommended'],
// @ts-expect-error
vueScopedCss.configs['flat/recommended'],

View File

@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1755186698,
"narHash": "sha256-wNO3+Ks2jZJ4nTHMuks+cxAiVBGNuEBXsT29Bz6HASo=",
"lastModified": 1760038930,
"narHash": "sha256-Oncbh0UmHjSlxO7ErQDM3KM0A5/Znfofj2BSzlHLeVw=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "fbcf476f790d8a217c3eab4e12033dc4a0f6d23c",
"rev": "0b4defa2584313f3b781240b29d61f6f9f7e0df3",
"type": "github"
},
"original": {

34
go.mod
View File

@ -1,6 +1,6 @@
module code.gitea.io/gitea
go 1.25.1
go 1.25.3
// rfc5280 said: "The serial number is an integer assigned by the CA to each certificate."
// But some CAs use negative serial number, just relax the check. related:
@ -35,7 +35,7 @@ require (
github.com/bohde/codel v0.2.0
github.com/buildkite/terminal-to-html/v3 v3.16.8
github.com/caddyserver/certmagic v0.24.0
github.com/charmbracelet/git-lfs-transfer v0.2.0
github.com/charmbracelet/git-lfs-transfer v0.1.1-0.20251013092601-6327009efd21
github.com/chi-middleware/proxy v1.1.1
github.com/dimiro1/reply v0.0.0-20200315094148-d0136a4c9e21
github.com/djherbis/buffer v1.2.0
@ -56,7 +56,7 @@ require (
github.com/go-co-op/gocron v1.37.0
github.com/go-enry/go-enry/v2 v2.9.2
github.com/go-git/go-billy/v5 v5.6.2
github.com/go-git/go-git/v5 v5.16.2
github.com/go-git/go-git/v5 v5.16.3
github.com/go-ldap/ldap/v3 v3.4.11
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.9.3
@ -84,7 +84,7 @@ require (
github.com/mattn/go-isatty v0.0.20
github.com/mattn/go-sqlite3 v1.14.32
github.com/meilisearch/meilisearch-go v0.33.2
github.com/mholt/archives v0.1.3
github.com/mholt/archives v0.0.0-20251009205813-e30ac6010726
github.com/microcosm-cc/bluemonday v1.0.27
github.com/microsoft/go-mssqldb v1.9.3
github.com/minio/minio-go/v7 v7.0.95
@ -116,13 +116,13 @@ require (
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
github.com/yuin/goldmark-meta v1.1.0
gitlab.com/gitlab-org/api/client-go v0.142.4
golang.org/x/crypto v0.41.0
golang.org/x/crypto v0.42.0
golang.org/x/image v0.30.0
golang.org/x/net v0.43.0
golang.org/x/net v0.44.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.17.0
golang.org/x/sys v0.35.0
golang.org/x/text v0.29.0
golang.org/x/sys v0.37.0
golang.org/x/text v0.30.0
google.golang.org/grpc v1.75.0
google.golang.org/protobuf v1.36.8
gopkg.in/ini.v1 v1.67.0
@ -142,7 +142,7 @@ require (
github.com/DataDog/zstd v1.5.7 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/RoaringBitmap/roaring/v2 v2.10.0 // indirect
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/STARRY-S/zip v0.2.3 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
@ -172,7 +172,7 @@ require (
github.com/blevesearch/zapx/v16 v16.2.4 // indirect
github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.0 // indirect
github.com/bodgit/sevenzip v1.6.1 // indirect
github.com/bodgit/windows v1.0.1 // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf // indirect
@ -233,14 +233,14 @@ require (
github.com/mikelolasagasti/xz v1.0.1 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minlz v1.0.0 // indirect
github.com/minio/minlz v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nwaples/rardecode/v2 v2.1.0 // indirect
github.com/nwaples/rardecode/v2 v2.2.0 // indirect
github.com/olekukonko/cat v0.0.0-20250817074551-3280053e4e00 // indirect
github.com/olekukonko/errors v1.1.0 // indirect
github.com/olekukonko/ll v0.1.0 // indirect
@ -259,7 +259,8 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.3.1 // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/sorairolake/lzip-go v0.3.8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect
github.com/tinylib/msgp v1.4.0 // indirect
github.com/unknwon/com v1.0.1 // indirect
@ -278,9 +279,9 @@ require (
go.uber.org/zap/exp v0.3.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
golang.org/x/mod v0.27.0 // indirect
golang.org/x/mod v0.28.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.36.0 // indirect
golang.org/x/tools v0.37.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@ -297,9 +298,6 @@ replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
replace github.com/nektos/act => gitea.com/gitea/act v0.261.7-0.20251003180512-ac6e4b751763
// TODO: the only difference is in `PutObject`: the fork doesn't use `NewVerifyingReader(r, sha256.New(), oid, expectedSize)`, need to figure out why
replace github.com/charmbracelet/git-lfs-transfer => gitea.com/gitea/git-lfs-transfer v0.2.0
replace git.sr.ht/~mariusor/go-xsd-duration => gitea.com/gitea/go-xsd-duration v0.0.0-20220703122237-02e73435a078
exclude github.com/gofrs/uuid v3.2.0+incompatible

63
go.sum
View File

@ -33,8 +33,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
gitea.com/gitea/act v0.261.7-0.20251003180512-ac6e4b751763 h1:ohdxegvslDEllZmRNDqpKun6L4Oq81jNdEDtGgHEV2c=
gitea.com/gitea/act v0.261.7-0.20251003180512-ac6e4b751763/go.mod h1:Pg5C9kQY1CEA3QjthjhlrqOC/QOT5NyWNjOjRHw23Ok=
gitea.com/gitea/git-lfs-transfer v0.2.0 h1:baHaNoBSRaeq/xKayEXwiDQtlIjps4Ac/Ll4KqLMB40=
gitea.com/gitea/git-lfs-transfer v0.2.0/go.mod h1:UrXUCm3xLQkq15fu7qlXHUMlrhdlXHoi13KH2Dfiits=
gitea.com/gitea/go-xsd-duration v0.0.0-20220703122237-02e73435a078 h1:BAFmdZpRW7zMQZQDClaCWobRj9uL1MR3MzpCVJvc5s4=
gitea.com/gitea/go-xsd-duration v0.0.0-20220703122237-02e73435a078/go.mod h1:g/V2Hjas6Z1UHUp4yIx6bATpNzJ7DYtD0FG3+xARWxs=
gitea.com/go-chi/binding v0.0.0-20240430071103-39a851e106ed h1:EZZBtilMLSZNWtHHcgq2mt6NSGhJSZBuduAlinMEmso=
@ -93,8 +91,8 @@ github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06
github.com/RoaringBitmap/roaring v0.7.1/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring/v2 v2.10.0 h1:HbJ8Cs71lfCJyvmSptxeMX2PtvOC8yonlU0GQcy2Ak0=
github.com/RoaringBitmap/roaring/v2 v2.10.0/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/STARRY-S/zip v0.2.3 h1:luE4dMvRPDOWQdeDdUxUoZkzUIpTccdKdhHHsQJ1fm4=
github.com/STARRY-S/zip v0.2.3/go.mod h1:lqJ9JdeRipyOQJrYSOtpNAiaesFO6zVDsE8GIGFaoSk=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.8.0 h1:tgjwQrDH5m6jIYB7kac5IQZmfUzQNseac/e3H4VoCNE=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.8.0/go.mod h1:1HmmMEVsr+0R1QWahSeMJkjSkq6CYAZu1aIbYSpfJ4o=
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
@ -193,8 +191,8 @@ github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTS
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A=
github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc=
github.com/bodgit/sevenzip v1.6.1 h1:kikg2pUMYC9ljU7W9SaqHXhym5HyKm8/M/jd31fYan4=
github.com/bodgit/sevenzip v1.6.1/go.mod h1:GVoYQbEVbOGT8n2pfqCIMRUaRjQ8F9oSqoBEqZh5fQ8=
github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/bohde/codel v0.2.0 h1:fzF7ibgKmCfQbOzQCblmQcwzDRmV7WO7VMLm/hDvD3E=
@ -219,6 +217,8 @@ github.com/cention-sany/utf7 v0.0.0-20170124080048-26cad61bd60a h1:MISbI8sU/PSK/
github.com/cention-sany/utf7 v0.0.0-20170124080048-26cad61bd60a/go.mod h1:2GxOXOlEPAMFPfp014mK1SWq8G8BN8o7/dfYqJrVGn8=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/git-lfs-transfer v0.1.1-0.20251013092601-6327009efd21 h1:2d64+4Jek9vjYwhY93AjbleiVH+AeWvPwPmDi1mfKFQ=
github.com/charmbracelet/git-lfs-transfer v0.1.1-0.20251013092601-6327009efd21/go.mod h1:fNlYtCHWTRC8MofQERZkVUNUWaOvZeTBqHn/amSbKZI=
github.com/chi-middleware/proxy v1.1.1 h1:4HaXUp8o2+bhHr1OhVy+VjN0+L7/07JDcn6v7YrTjrQ=
github.com/chi-middleware/proxy v1.1.1/go.mod h1:jQwMEJct2tz9VmtCELxvnXoMfa+SOdikvbVJVHv/M+0=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
@ -339,8 +339,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
@ -572,8 +572,8 @@ github.com/meilisearch/meilisearch-go v0.33.2 h1:YgsQSLYhAkRN2ias6I1KNRTjdYCN5w2
github.com/meilisearch/meilisearch-go v0.33.2/go.mod h1:6eOPcQ+OAuwXvnONlfSgfgvr7TIAWM/6OdhcVHg8cF0=
github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc=
github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
github.com/mholt/archives v0.1.3 h1:aEAaOtNra78G+TvV5ohmXrJOAzf++dIlYeDW3N9q458=
github.com/mholt/archives v0.1.3/go.mod h1:LUCGp++/IbV/I0Xq4SzcIR6uwgeh2yjnQWamjRQfLTU=
github.com/mholt/archives v0.0.0-20251009205813-e30ac6010726 h1:narluFTg20M5KBwKxedpFiSMkdjQRRNUlpY4uAsKMwk=
github.com/mholt/archives v0.0.0-20251009205813-e30ac6010726/go.mod h1:3TPMmBLPsgszL+1As5zECTuKwKvIfj6YcwWPpeTAXF4=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/microsoft/go-mssqldb v1.9.3 h1:hy4p+LDC8LIGvI3JATnLVmBOLMJbmn5X400mr5j0lPs=
@ -588,8 +588,8 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU=
github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo=
github.com/minio/minlz v1.0.0 h1:Kj7aJZ1//LlTP1DM8Jm7lNKvvJS2m74gyyXXn3+uJWQ=
github.com/minio/minlz v1.0.0/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec=
github.com/minio/minlz v1.0.1 h1:OUZUzXcib8diiX+JYxyRLIdomyZYzHct6EShOKtQY2A=
github.com/minio/minlz v1.0.1/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -610,8 +610,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/niklasfasching/go-org v1.9.1 h1:/3s4uTPOF06pImGa2Yvlp24yKXZoTYM+nsIlMzfpg/0=
github.com/niklasfasching/go-org v1.9.1/go.mod h1:ZAGFFkWvUQcpazmi/8nHqwvARpr1xpb+Es67oUGX/48=
github.com/nwaples/rardecode/v2 v2.1.0 h1:JQl9ZoBPDy+nIZGb1mx8+anfHp/LV3NE2MjMiv0ct/U=
github.com/nwaples/rardecode/v2 v2.1.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
github.com/nwaples/rardecode/v2 v2.2.0 h1:4ufPGHiNe1rYJxYfehALLjup4Ls3ck42CWwjKiOqu0A=
github.com/nwaples/rardecode/v2 v2.2.0/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@ -714,9 +714,11 @@ github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYl
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg=
github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk=
github.com/sorairolake/lzip-go v0.3.8 h1:j5Q2313INdTA80ureWYRhX+1K78mUXfMoPZCw/ivWik=
github.com/sorairolake/lzip-go v0.3.8/go.mod h1:JcBqGMV0frlxwrsE9sMWXDjqn3EeVf0/54YPsw66qkU=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@ -729,6 +731,7 @@ github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -837,8 +840,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -875,8 +878,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -905,8 +908,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -972,8 +975,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -984,8 +987,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -999,8 +1002,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
@ -1036,8 +1039,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -16,13 +16,13 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
webhook_module "code.gitea.io/gitea/modules/webhook"
"github.com/nektos/act/pkg/jobparser"
"xorm.io/builder"
)
@ -30,7 +30,7 @@ import (
type ActionRun struct {
ID int64
Title string
RepoID int64 `xorm:"index unique(repo_index)"`
RepoID int64 `xorm:"unique(repo_index) index(repo_concurrency)"`
Repo *repo_model.Repository `xorm:"-"`
OwnerID int64 `xorm:"index"`
WorkflowID string `xorm:"index"` // the name of workflow file
@ -49,6 +49,9 @@ type ActionRun struct {
TriggerEvent string // the trigger event defined in the `on` configuration of the triggered workflow
Status Status `xorm:"index"`
Version int `xorm:"version default 0"` // Status could be updated concomitantly, so an optimistic lock is needed
RawConcurrency string // raw concurrency
ConcurrencyGroup string `xorm:"index(repo_concurrency) NOT NULL DEFAULT ''"`
ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"`
// Started and Stopped is used for recording last run time, if rerun happened, they will be reset to 0
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
@ -102,6 +105,15 @@ func (run *ActionRun) PrettyRef() string {
return refName.ShortName()
}
// RefTooltip return a tooltop of run's ref. For pull request, it's the title of the PR, otherwise it's the ShortName.
func (run *ActionRun) RefTooltip() string {
payload, err := run.GetPullRequestEventPayload()
if err == nil && payload != nil && payload.PullRequest != nil {
return payload.PullRequest.Title
}
return git.RefName(run.Ref).ShortName()
}
// LoadAttributes load Repo TriggerUser if not loaded
func (run *ActionRun) LoadAttributes(ctx context.Context) error {
if run == nil {
@ -181,7 +193,7 @@ func (run *ActionRun) IsSchedule() bool {
return run.ScheduleID > 0
}
func updateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) error {
func UpdateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) error {
_, err := db.GetEngine(ctx).ID(repo.ID).
NoAutoTime().
SetExpr("num_action_runs",
@ -238,116 +250,62 @@ func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID strin
return cancelledJobs, err
}
// Iterate over each job and attempt to cancel it.
for _, job := range jobs {
// Skip jobs that are already in a terminal state (completed, cancelled, etc.).
status := job.Status
if status.IsDone() {
continue
}
// If the job has no associated task (probably an error), set its status to 'Cancelled' and stop it.
if job.TaskID == 0 {
job.Status = StatusCancelled
job.Stopped = timeutil.TimeStampNow()
// Update the job's status and stopped time in the database.
n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped")
if err != nil {
return cancelledJobs, err
}
// If the update affected 0 rows, it means the job has changed in the meantime, so we need to try again.
if n == 0 {
return cancelledJobs, errors.New("job has changed, try again")
}
cancelledJobs = append(cancelledJobs, job)
// Continue with the next job.
continue
}
// If the job has an associated task, try to stop the task, effectively cancelling the job.
if err := StopTask(ctx, job.TaskID, StatusCancelled); err != nil {
return cancelledJobs, err
}
cancelledJobs = append(cancelledJobs, job)
cjs, err := CancelJobs(ctx, jobs)
if err != nil {
return cancelledJobs, err
}
cancelledJobs = append(cancelledJobs, cjs...)
}
// Return nil to indicate successful cancellation of all running and waiting jobs.
return cancelledJobs, nil
}
// InsertRun inserts a run
// The title will be cut off at 255 characters if it's longer than 255 characters.
func InsertRun(ctx context.Context, run *ActionRun, jobs []*jobparser.SingleWorkflow) error {
return db.WithTx(ctx, func(ctx context.Context) error {
index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID)
if err != nil {
return err
}
run.Index = index
run.Title = util.EllipsisDisplayString(run.Title, 255)
if err := db.Insert(ctx, run); err != nil {
return err
func CancelJobs(ctx context.Context, jobs []*ActionRunJob) ([]*ActionRunJob, error) {
cancelledJobs := make([]*ActionRunJob, 0, len(jobs))
// Iterate over each job and attempt to cancel it.
for _, job := range jobs {
// Skip jobs that are already in a terminal state (completed, cancelled, etc.).
status := job.Status
if status.IsDone() {
continue
}
if run.Repo == nil {
repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
// If the job has no associated task (probably an error), set its status to 'Cancelled' and stop it.
if job.TaskID == 0 {
job.Status = StatusCancelled
job.Stopped = timeutil.TimeStampNow()
// Update the job's status and stopped time in the database.
n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped")
if err != nil {
return err
return cancelledJobs, err
}
run.Repo = repo
// If the update affected 0 rows, it means the job has changed in the meantime
if n == 0 {
log.Error("Failed to cancel job %d because it has changed", job.ID)
continue
}
cancelledJobs = append(cancelledJobs, job)
// Continue with the next job.
continue
}
if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
return err
// If the job has an associated task, try to stop the task, effectively cancelling the job.
if err := StopTask(ctx, job.TaskID, StatusCancelled); err != nil {
return cancelledJobs, err
}
updatedJob, err := GetRunJobByID(ctx, job.ID)
if err != nil {
return cancelledJobs, fmt.Errorf("get job: %w", err)
}
cancelledJobs = append(cancelledJobs, updatedJob)
}
runJobs := make([]*ActionRunJob, 0, len(jobs))
var hasWaiting bool
for _, v := range jobs {
id, job := v.Job()
needs := job.Needs()
if err := v.SetJob(id, job.EraseNeeds()); err != nil {
return err
}
payload, _ := v.Marshal()
status := StatusWaiting
if len(needs) > 0 || run.NeedApproval {
status = StatusBlocked
} else {
hasWaiting = true
}
job.Name = util.EllipsisDisplayString(job.Name, 255)
runJobs = append(runJobs, &ActionRunJob{
RunID: run.ID,
RepoID: run.RepoID,
OwnerID: run.OwnerID,
CommitSHA: run.CommitSHA,
IsForkPullRequest: run.IsForkPullRequest,
Name: job.Name,
WorkflowPayload: payload,
JobID: id,
Needs: needs,
RunsOn: job.RunsOn(),
Status: status,
})
}
if err := db.Insert(ctx, runJobs); err != nil {
return err
}
// if there is a job in the waiting status, increase tasks version.
if hasWaiting {
if err := IncreaseTaskVersion(ctx, run.OwnerID, run.RepoID); err != nil {
return err
}
}
return nil
})
// Return nil to indicate successful cancellation of all running and waiting jobs.
return cancelledJobs, nil
}
func GetRunByRepoAndID(ctx context.Context, repoID, runID int64) (*ActionRun, error) {
@ -432,7 +390,7 @@ func UpdateRun(ctx context.Context, run *ActionRun, cols ...string) error {
if err = run.LoadRepo(ctx); err != nil {
return err
}
if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
if err := UpdateRepoRunsNumbers(ctx, run.Repo); err != nil {
return err
}
}
@ -441,3 +399,59 @@ func UpdateRun(ctx context.Context, run *ActionRun, cols ...string) error {
}
type ActionRunIndex db.ResourceIndex
func GetConcurrentRunsAndJobs(ctx context.Context, repoID int64, concurrencyGroup string, status []Status) ([]*ActionRun, []*ActionRunJob, error) {
runs, err := db.Find[ActionRun](ctx, &FindRunOptions{
RepoID: repoID,
ConcurrencyGroup: concurrencyGroup,
Status: status,
})
if err != nil {
return nil, nil, fmt.Errorf("find runs: %w", err)
}
jobs, err := db.Find[ActionRunJob](ctx, &FindRunJobOptions{
RepoID: repoID,
ConcurrencyGroup: concurrencyGroup,
Statuses: status,
})
if err != nil {
return nil, nil, fmt.Errorf("find jobs: %w", err)
}
return runs, jobs, nil
}
func CancelPreviousJobsByRunConcurrency(ctx context.Context, actionRun *ActionRun) ([]*ActionRunJob, error) {
if actionRun.ConcurrencyGroup == "" {
return nil, nil
}
var jobsToCancel []*ActionRunJob
statusFindOption := []Status{StatusWaiting, StatusBlocked}
if actionRun.ConcurrencyCancel {
statusFindOption = append(statusFindOption, StatusRunning)
}
runs, jobs, err := GetConcurrentRunsAndJobs(ctx, actionRun.RepoID, actionRun.ConcurrencyGroup, statusFindOption)
if err != nil {
return nil, fmt.Errorf("find concurrent runs and jobs: %w", err)
}
jobsToCancel = append(jobsToCancel, jobs...)
// cancel runs in the same concurrency group
for _, run := range runs {
if run.ID == actionRun.ID {
continue
}
jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{
RunID: run.ID,
})
if err != nil {
return nil, fmt.Errorf("find run %d jobs: %w", run.ID, err)
}
jobsToCancel = append(jobsToCancel, jobs...)
}
return CancelJobs(ctx, jobsToCancel)
}

View File

@ -14,6 +14,7 @@ import (
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"github.com/nektos/act/pkg/jobparser"
"xorm.io/builder"
)
@ -22,23 +23,38 @@ type ActionRunJob struct {
ID int64
RunID int64 `xorm:"index"`
Run *ActionRun `xorm:"-"`
RepoID int64 `xorm:"index"`
RepoID int64 `xorm:"index(repo_concurrency)"`
Repo *repo_model.Repository `xorm:"-"`
OwnerID int64 `xorm:"index"`
CommitSHA string `xorm:"index"`
IsForkPullRequest bool
Name string `xorm:"VARCHAR(255)"`
Attempt int64
WorkflowPayload []byte
JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
Needs []string `xorm:"JSON TEXT"`
RunsOn []string `xorm:"JSON TEXT"`
TaskID int64 // the latest task of the job
Status Status `xorm:"index"`
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
// WorkflowPayload is act/jobparser.SingleWorkflow for act/jobparser.Parse
// it should contain exactly one job with global workflow fields for this model
WorkflowPayload []byte
JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
Needs []string `xorm:"JSON TEXT"`
RunsOn []string `xorm:"JSON TEXT"`
TaskID int64 // the latest task of the job
Status Status `xorm:"index"`
RawConcurrency string // raw concurrency from job YAML's "concurrency" section
// IsConcurrencyEvaluated is only valid/needed when this job's RawConcurrency is not empty.
// If RawConcurrency can't be evaluated (e.g. depend on other job's outputs or have errors), this field will be false.
// If RawConcurrency has been successfully evaluated, this field will be true, ConcurrencyGroup and ConcurrencyCancel are also set.
IsConcurrencyEvaluated bool
ConcurrencyGroup string `xorm:"index(repo_concurrency) NOT NULL DEFAULT ''"` // evaluated concurrency.group
ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"` // evaluated concurrency.cancel-in-progress
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
}
func init() {
@ -84,6 +100,24 @@ func (job *ActionRunJob) LoadAttributes(ctx context.Context) error {
return job.Run.LoadAttributes(ctx)
}
// ParseJob parses the job structure from the ActionRunJob.WorkflowPayload
func (job *ActionRunJob) ParseJob() (*jobparser.Job, error) {
// job.WorkflowPayload is a SingleWorkflow created from an ActionRun's workflow, which exactly contains this job's YAML definition.
// Ideally it shouldn't be called "Workflow", it is just a job with global workflow fields + trigger
parsedWorkflows, err := jobparser.Parse(job.WorkflowPayload)
if err != nil {
return nil, fmt.Errorf("job %d single workflow: unable to parse: %w", job.ID, err)
} else if len(parsedWorkflows) != 1 {
return nil, fmt.Errorf("job %d single workflow: not single workflow", job.ID)
}
_, workflowJob := parsedWorkflows[0].Job()
if workflowJob == nil {
// it shouldn't happen, and since the callers don't check nil, so return an error instead of nil
return nil, util.ErrorWrap(util.ErrNotExist, "job %d single workflow: payload doesn't contain a job", job.ID)
}
return workflowJob, nil
}
func GetRunJobByID(ctx context.Context, id int64) (*ActionRunJob, error) {
var job ActionRunJob
has, err := db.GetEngine(ctx).Where("id=?", id).Get(&job)
@ -125,7 +159,7 @@ func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, col
return affected, nil
}
if affected != 0 && slices.Contains(cols, "status") && job.Status.IsWaiting() {
if slices.Contains(cols, "status") && job.Status.IsWaiting() {
// if the status of job changes to waiting again, increase tasks version.
if err := IncreaseTaskVersion(ctx, job.OwnerID, job.RepoID); err != nil {
return 0, err
@ -197,3 +231,39 @@ func AggregateJobStatus(jobs []*ActionRunJob) Status {
return StatusUnknown // it shouldn't happen
}
}
func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) (jobsToCancel []*ActionRunJob, _ error) {
if job.RawConcurrency == "" {
return nil, nil
}
if !job.IsConcurrencyEvaluated {
return nil, nil
}
if job.ConcurrencyGroup == "" {
return nil, nil
}
statusFindOption := []Status{StatusWaiting, StatusBlocked}
if job.ConcurrencyCancel {
statusFindOption = append(statusFindOption, StatusRunning)
}
runs, jobs, err := GetConcurrentRunsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, statusFindOption)
if err != nil {
return nil, fmt.Errorf("find concurrent runs and jobs: %w", err)
}
jobs = slices.DeleteFunc(jobs, func(j *ActionRunJob) bool { return j.ID == job.ID })
jobsToCancel = append(jobsToCancel, jobs...)
// cancel runs in the same concurrency group
for _, run := range runs {
jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{
RunID: run.ID,
})
if err != nil {
return nil, fmt.Errorf("find run %d jobs: %w", run.ID, err)
}
jobsToCancel = append(jobsToCancel, jobs...)
}
return CancelJobs(ctx, jobsToCancel)
}

View File

@ -69,12 +69,13 @@ func (jobs ActionJobList) LoadAttributes(ctx context.Context, withRepo bool) err
type FindRunJobOptions struct {
db.ListOptions
RunID int64
RepoID int64
OwnerID int64
CommitSHA string
Statuses []Status
UpdatedBefore timeutil.TimeStamp
RunID int64
RepoID int64
OwnerID int64
CommitSHA string
Statuses []Status
UpdatedBefore timeutil.TimeStamp
ConcurrencyGroup string
}
func (opts FindRunJobOptions) ToConds() builder.Cond {
@ -94,6 +95,12 @@ func (opts FindRunJobOptions) ToConds() builder.Cond {
if opts.UpdatedBefore > 0 {
cond = cond.And(builder.Lt{"`action_run_job`.updated": opts.UpdatedBefore})
}
if opts.ConcurrencyGroup != "" {
if opts.RepoID == 0 {
panic("Invalid FindRunJobOptions: repo_id is required")
}
cond = cond.And(builder.Eq{"`action_run_job`.concurrency_group": opts.ConcurrencyGroup})
}
return cond
}

View File

@ -64,15 +64,16 @@ func (runs RunList) LoadRepos(ctx context.Context) error {
type FindRunOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
WorkflowID string
Ref string // the commit/tag/… that caused this workflow
TriggerUserID int64
TriggerEvent webhook_module.HookEventType
Approved bool // not util.OptionalBool, it works only when it's true
Status []Status
CommitSHA string
RepoID int64
OwnerID int64
WorkflowID string
Ref string // the commit/tag/… that caused this workflow
TriggerUserID int64
TriggerEvent webhook_module.HookEventType
Approved bool // not util.OptionalBool, it works only when it's true
Status []Status
ConcurrencyGroup string
CommitSHA string
}
func (opts FindRunOptions) ToConds() builder.Cond {
@ -101,6 +102,12 @@ func (opts FindRunOptions) ToConds() builder.Cond {
if opts.CommitSHA != "" {
cond = cond.And(builder.Eq{"`action_run`.commit_sha": opts.CommitSHA})
}
if len(opts.ConcurrencyGroup) > 0 {
if opts.RepoID == 0 {
panic("Invalid FindRunOptions: repo_id is required")
}
cond = cond.And(builder.Eq{"`action_run`.concurrency_group": opts.ConcurrencyGroup})
}
return cond
}

View File

@ -21,7 +21,6 @@ import (
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nektos/act/pkg/jobparser"
"google.golang.org/protobuf/types/known/timestamppb"
"xorm.io/builder"
)
@ -278,13 +277,10 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask
return nil, false, err
}
parsedWorkflows, err := jobparser.Parse(job.WorkflowPayload)
workflowJob, err := job.ParseJob()
if err != nil {
return nil, false, fmt.Errorf("parse workflow of job %d: %w", job.ID, err)
} else if len(parsedWorkflows) != 1 {
return nil, false, fmt.Errorf("workflow of job %d: not single workflow", job.ID)
return nil, false, fmt.Errorf("load job %d: %w", job.ID, err)
}
_, workflowJob := parsedWorkflows[0].Job()
if _, err := e.Insert(task); err != nil {
return nil, false, err

View File

@ -173,7 +173,7 @@ func GetReviewsByIssueID(ctx context.Context, issueID int64) (latestReviews, mig
reviewersMap := make(map[int64][]*Review) // key is reviewer id
originalReviewersMap := make(map[int64][]*Review) // key is original author id
reviewTeamsMap := make(map[int64][]*Review) // key is reviewer team id
countedReivewTypes := []ReviewType{ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest}
countedReivewTypes := []ReviewType{ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest, ReviewTypeComment}
for _, review := range reviews {
if review.ReviewerTeamID == 0 && slices.Contains(countedReivewTypes, review.Type) && !review.Dismissed {
if review.OriginalAuthorID != 0 {

View File

@ -122,6 +122,7 @@ func TestGetReviewersByIssueID(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
issue := unittest.AssertExistsAndLoadBean(t, &issues_model.Issue{ID: 3})
user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4})
@ -129,6 +130,12 @@ func TestGetReviewersByIssueID(t *testing.T) {
expectedReviews := []*issues_model.Review{}
expectedReviews = append(expectedReviews,
&issues_model.Review{
ID: 5,
Reviewer: user1,
Type: issues_model.ReviewTypeComment,
UpdatedUnix: 946684810,
},
&issues_model.Review{
ID: 7,
Reviewer: org3,
@ -167,8 +174,9 @@ func TestGetReviewersByIssueID(t *testing.T) {
for _, review := range allReviews {
assert.NoError(t, review.LoadReviewer(t.Context()))
}
if assert.Len(t, allReviews, 5) {
if assert.Len(t, allReviews, 6) {
for i, review := range allReviews {
assert.Equal(t, expectedReviews[i].ID, review.ID)
assert.Equal(t, expectedReviews[i].Reviewer, review.Reviewer)
assert.Equal(t, expectedReviews[i].Type, review.Type)
assert.Equal(t, expectedReviews[i].UpdatedUnix, review.UpdatedUnix)

View File

@ -394,6 +394,7 @@ func prepareMigrationTasks() []*migration {
// Gitea 1.24.0 ends at database version 321
newMigration(321, "Use LONGTEXT for some columns and fix review_state.updated_files column", v1_25.UseLongTextInSomeColumnsAndFixBugs),
newMigration(322, "Extend comment tree_path length limit", v1_25.ExtendCommentTreePathLength),
newMigration(323, "Add support for actions concurrency", v1_25.AddActionsConcurrency),
}
return preparedMigrations
}

View File

@ -0,0 +1,43 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_25
import (
"xorm.io/xorm"
)
func AddActionsConcurrency(x *xorm.Engine) error {
type ActionRun struct {
RepoID int64 `xorm:"index(repo_concurrency)"`
RawConcurrency string
ConcurrencyGroup string `xorm:"index(repo_concurrency) NOT NULL DEFAULT ''"`
ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"`
}
if _, err := x.SyncWithOptions(xorm.SyncOptions{
IgnoreDropIndices: true,
}, new(ActionRun)); err != nil {
return err
}
if err := x.Sync(new(ActionRun)); err != nil {
return err
}
type ActionRunJob struct {
RepoID int64 `xorm:"index(repo_concurrency)"`
RawConcurrency string
IsConcurrencyEvaluated bool
ConcurrencyGroup string `xorm:"index(repo_concurrency) NOT NULL DEFAULT ''"`
ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"`
}
if _, err := x.SyncWithOptions(xorm.SyncOptions{
IgnoreDropIndices: true,
}, new(ActionRunJob)); err != nil {
return err
}
return nil
}

View File

@ -229,10 +229,6 @@ func RelativePath(ownerName, repoName string) string {
return strings.ToLower(ownerName) + "/" + strings.ToLower(repoName) + ".git"
}
func RelativeWikiPath(ownerName, repoName string) string {
return strings.ToLower(ownerName) + "/" + strings.ToLower(repoName) + ".wiki.git"
}
// RelativePath should be an unix style path like username/reponame.git
func (repo *Repository) RelativePath() string {
return RelativePath(repo.OwnerName, repo.Name)
@ -245,12 +241,6 @@ func (sr StorageRepo) RelativePath() string {
return string(sr)
}
// WikiStorageRepo returns the storage repo for the wiki
// The wiki repository should have the same object format as the code repository
func (repo *Repository) WikiStorageRepo() StorageRepo {
return StorageRepo(RelativeWikiPath(repo.OwnerName, repo.Name))
}
// SanitizedOriginalURL returns a sanitized OriginalURL
func (repo *Repository) SanitizedOriginalURL() string {
if repo.OriginalURL == "" {

View File

@ -7,7 +7,6 @@ package repo
import (
"context"
"fmt"
"path/filepath"
"strings"
user_model "code.gitea.io/gitea/models/user"
@ -76,12 +75,12 @@ func (repo *Repository) WikiCloneLink(ctx context.Context, doer *user_model.User
return repo.cloneLink(ctx, doer, repo.Name+".wiki")
}
// WikiPath returns wiki data path by given user and repository name.
func WikiPath(userName, repoName string) string {
return filepath.Join(user_model.UserPath(userName), strings.ToLower(repoName)+".wiki.git")
func RelativeWikiPath(ownerName, repoName string) string {
return strings.ToLower(ownerName) + "/" + strings.ToLower(repoName) + ".wiki.git"
}
// WikiPath returns wiki data path for given repository.
func (repo *Repository) WikiPath() string {
return WikiPath(repo.OwnerName, repo.Name)
// WikiStorageRepo returns the storage repo for the wiki
// The wiki repository should have the same object format as the code repository
func (repo *Repository) WikiStorageRepo() StorageRepo {
return StorageRepo(RelativeWikiPath(repo.OwnerName, repo.Name))
}

View File

@ -4,12 +4,10 @@
package repo_test
import (
"path/filepath"
"testing"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
)
@ -23,15 +21,10 @@ func TestRepository_WikiCloneLink(t *testing.T) {
assert.Equal(t, "https://try.gitea.io/user2/repo1.wiki.git", cloneLink.HTTPS)
}
func TestWikiPath(t *testing.T) {
func TestRepository_RelativeWikiPath(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
assert.Equal(t, expected, repo_model.WikiPath("user2", "repo1"))
}
func TestRepository_WikiPath(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
expected := filepath.Join(setting.RepoRootPath, "user2/repo1.wiki.git")
assert.Equal(t, expected, repo.WikiPath())
assert.Equal(t, "user2/repo1.wiki.git", repo_model.RelativeWikiPath(repo.OwnerName, repo.Name))
assert.Equal(t, "user2/repo1.wiki.git", repo.WikiStorageRepo().RelativePath())
}

View File

@ -3,7 +3,13 @@
package git
import "code.gitea.io/gitea/modules/setting"
import (
"context"
"strings"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/setting"
)
// Based on https://git-scm.com/docs/git-config#Documentation/git-config.txt-gpgformat
const (
@ -24,3 +30,48 @@ func (s *SigningKey) String() string {
setting.PanicInDevOrTesting("don't call SigningKey.String() - it exposes the KeyID which might be a local file path")
return "SigningKey:" + s.Format
}
// GetSigningKey returns the KeyID and git Signature for the repo
func GetSigningKey(ctx context.Context, repoPath string) (*SigningKey, *Signature) {
if setting.Repository.Signing.SigningKey == "none" {
return nil, nil
}
if setting.Repository.Signing.SigningKey == "default" || setting.Repository.Signing.SigningKey == "" {
// Can ignore the error here as it means that commit.gpgsign is not set
value, _, _ := gitcmd.NewCommand("config", "--get", "commit.gpgsign").WithDir(repoPath).RunStdString(ctx)
sign, valid := ParseBool(strings.TrimSpace(value))
if !sign || !valid {
return nil, nil
}
format, _, _ := gitcmd.NewCommand("config", "--default", SigningKeyFormatOpenPGP, "--get", "gpg.format").WithDir(repoPath).RunStdString(ctx)
signingKey, _, _ := gitcmd.NewCommand("config", "--get", "user.signingkey").WithDir(repoPath).RunStdString(ctx)
signingName, _, _ := gitcmd.NewCommand("config", "--get", "user.name").WithDir(repoPath).RunStdString(ctx)
signingEmail, _, _ := gitcmd.NewCommand("config", "--get", "user.email").WithDir(repoPath).RunStdString(ctx)
if strings.TrimSpace(signingKey) == "" {
return nil, nil
}
return &SigningKey{
KeyID: strings.TrimSpace(signingKey),
Format: strings.TrimSpace(format),
}, &Signature{
Name: strings.TrimSpace(signingName),
Email: strings.TrimSpace(signingEmail),
}
}
if setting.Repository.Signing.SigningKey == "" {
return nil, nil
}
return &SigningKey{
KeyID: setting.Repository.Signing.SigningKey,
Format: setting.Repository.Signing.SigningFormat,
}, &Signature{
Name: setting.Repository.Signing.SigningName,
Email: setting.Repository.Signing.SigningEmail,
}
}

View File

@ -34,12 +34,12 @@ func TestParseGitURLs(t *testing.T) {
},
},
{
kase: "git@[fe80:14fc:cec5:c174:d88%2510]:go-gitea/gitea.git",
kase: "git@[fe80::14fc:cec5:c174:d88%2510]:go-gitea/gitea.git",
expected: &GitURL{
URL: &url.URL{
Scheme: "ssh",
User: url.User("git"),
Host: "[fe80:14fc:cec5:c174:d88%10]",
Host: "[fe80::14fc:cec5:c174:d88%10]",
Path: "go-gitea/gitea.git",
},
extraMark: 1,
@ -137,11 +137,11 @@ func TestParseGitURLs(t *testing.T) {
},
},
{
kase: "https://[fe80:14fc:cec5:c174:d88%2510]:20/go-gitea/gitea.git",
kase: "https://[fe80::14fc:cec5:c174:d88%2510]:20/go-gitea/gitea.git",
expected: &GitURL{
URL: &url.URL{
Scheme: "https",
Host: "[fe80:14fc:cec5:c174:d88%10]:20",
Host: "[fe80::14fc:cec5:c174:d88%10]:20",
Path: "/go-gitea/gitea.git",
},
extraMark: 0,

20
modules/gitrepo/clone.go Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package gitrepo
import (
"context"
"code.gitea.io/gitea/modules/git"
)
// CloneExternalRepo clones an external repository to the managed repository.
func CloneExternalRepo(ctx context.Context, fromRemoteURL string, toRepo Repository, opts git.CloneRepoOptions) error {
return git.Clone(ctx, fromRemoteURL, repoPath(toRepo), opts)
}
// CloneRepoToLocal clones a managed repository to a local path.
func CloneRepoToLocal(ctx context.Context, fromRepo Repository, toLocalPath string, opts git.CloneRepoOptions) error {
return git.Clone(ctx, repoPath(fromRepo), toLocalPath, opts)
}

View File

@ -0,0 +1,14 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package gitrepo
import (
"context"
"code.gitea.io/gitea/modules/git"
)
func WriteCommitGraph(ctx context.Context, repo Repository) error {
return git.WriteCommitGraph(ctx, repoPath(repo))
}

View File

@ -7,9 +7,12 @@ import (
"context"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/reqctx"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
@ -86,3 +89,12 @@ func RenameRepository(ctx context.Context, repo, newRepo Repository) error {
func InitRepository(ctx context.Context, repo Repository, objectFormatName string) error {
return git.InitRepository(ctx, repoPath(repo), true, objectFormatName)
}
func UpdateServerInfo(ctx context.Context, repo Repository) error {
_, _, err := RunCmdBytes(ctx, repo, gitcmd.NewCommand("update-server-info"))
return err
}
func GetRepoFS(repo Repository) fs.FS {
return os.DirFS(repoPath(repo))
}

14
modules/gitrepo/push.go Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package gitrepo
import (
"context"
"code.gitea.io/gitea/modules/git"
)
func Push(ctx context.Context, repo Repository, opts git.PushOptions) error {
return git.Push(ctx, repoPath(repo), opts)
}

View File

@ -0,0 +1,14 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package gitrepo
import (
"context"
"code.gitea.io/gitea/modules/git"
)
func GetSigningKey(ctx context.Context, repo Repository) (*git.SigningKey, *git.Signature) {
return git.GetSigningKey(ctx, repoPath(repo))
}

View File

@ -4,7 +4,10 @@
package hcaptcha
import (
"errors"
"io"
"net/http"
"net/url"
"os"
"strings"
"testing"
@ -21,6 +24,33 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
type mockTransport struct{}
func (mockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if req.URL.String() != verifyURL {
return nil, errors.New("unsupported url")
}
body, err := io.ReadAll(req.Body)
if err != nil {
return nil, err
}
bodyValues, err := url.ParseQuery(string(body))
if err != nil {
return nil, err
}
var responseText string
if bodyValues.Get("response") == dummyToken {
responseText = `{"success":true,"credit":false,"hostname":"dummy-key-pass","challenge_ts":"2025-10-08T16:02:56.136Z"}`
} else {
responseText = `{"success":false,"error-codes":["invalid-input-response"]}`
}
return &http.Response{Request: req, Body: io.NopCloser(strings.NewReader(responseText))}, nil
}
func TestCaptcha(t *testing.T) {
tt := []struct {
Name string
@ -54,7 +84,8 @@ func TestCaptcha(t *testing.T) {
for _, tc := range tt {
t.Run(tc.Name, func(t *testing.T) {
client, err := New(tc.Secret, WithHTTP(&http.Client{
Timeout: time.Second * 5,
Timeout: time.Second * 5,
Transport: mockTransport{},
}))
if err != nil {
// The only error that can be returned from creating a client

View File

@ -7,54 +7,53 @@ package httplib
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
var defaultSetting = Settings{"GiteaServer", 60 * time.Second, 60 * time.Second, nil, nil}
// newRequest returns *Request with specific method
func newRequest(url, method string) *Request {
var resp http.Response
req := http.Request{
Method: method,
Header: make(http.Header),
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
var defaultTransport = sync.OnceValue(func() http.RoundTripper {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: DialContextWithTimeout(10 * time.Second), // it is good enough in modern days
}
})
func DialContextWithTimeout(timeout time.Duration) func(ctx context.Context, network, address string) (net.Conn, error) {
return func(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Timeout: timeout}).DialContext(ctx, network, address)
}
return &Request{url, &req, map[string]string{}, defaultSetting, &resp, nil}
}
// NewRequest returns *Request with specific method
func NewRequest(url, method string) *Request {
return newRequest(url, method)
return &Request{
url: url,
req: &http.Request{
Method: method,
Header: make(http.Header),
Proto: "HTTP/1.1", // FIXME: from legacy httplib, it shouldn't be hardcoded
ProtoMajor: 1,
ProtoMinor: 1,
},
params: map[string]string{},
// ATTENTION: from legacy httplib, callers must pay more attention to it, it will cause annoying bugs when the response takes a long time
readWriteTimeout: 60 * time.Second,
}
}
// Settings is the default settings for http client
type Settings struct {
UserAgent string
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
TLSClientConfig *tls.Config
Transport http.RoundTripper
}
// Request provides more useful methods for requesting one url than http.Request.
type Request struct {
url string
req *http.Request
params map[string]string
setting Settings
resp *http.Response
body []byte
url string
req *http.Request
params map[string]string
readWriteTimeout time.Duration
transport http.RoundTripper
}
// SetContext sets the request's Context
@ -63,36 +62,24 @@ func (r *Request) SetContext(ctx context.Context) *Request {
return r
}
// SetTimeout sets connect time out and read-write time out for BeegoRequest.
func (r *Request) SetTimeout(connectTimeout, readWriteTimeout time.Duration) *Request {
r.setting.ConnectTimeout = connectTimeout
r.setting.ReadWriteTimeout = readWriteTimeout
// SetTransport sets the request transport, if not set, will use httplib's default transport with environment proxy support
// ATTENTION: the http.Transport has a connection pool, so it should be reused as much as possible, do not create a lot of transports
func (r *Request) SetTransport(transport http.RoundTripper) *Request {
r.transport = transport
return r
}
func (r *Request) SetReadWriteTimeout(readWriteTimeout time.Duration) *Request {
r.setting.ReadWriteTimeout = readWriteTimeout
r.readWriteTimeout = readWriteTimeout
return r
}
// SetTLSClientConfig sets tls connection configurations if visiting https url.
func (r *Request) SetTLSClientConfig(config *tls.Config) *Request {
r.setting.TLSClientConfig = config
return r
}
// Header add header item string in request.
// Header set header item string in request.
func (r *Request) Header(key, value string) *Request {
r.req.Header.Set(key, value)
return r
}
// SetTransport sets transport to
func (r *Request) SetTransport(transport http.RoundTripper) *Request {
r.setting.Transport = transport
return r
}
// Param adds query param in to request.
// params build query string as ?key1=value1&key2=value2...
func (r *Request) Param(key, value string) *Request {
@ -125,11 +112,9 @@ func (r *Request) Body(data any) *Request {
return r
}
func (r *Request) getResponse() (*http.Response, error) {
if r.resp.StatusCode != 0 {
return r.resp, nil
}
// Response executes request client and returns the response.
// Caller MUST close the response body if no error occurs.
func (r *Request) Response() (*http.Response, error) {
var paramBody string
if len(r.params) > 0 {
var buf bytes.Buffer
@ -160,59 +145,19 @@ func (r *Request) getResponse() (*http.Response, error) {
return nil, err
}
trans := r.setting.Transport
if trans == nil {
// create default transport
trans = &http.Transport{
TLSClientConfig: r.setting.TLSClientConfig,
Proxy: http.ProxyFromEnvironment,
DialContext: TimeoutDialer(r.setting.ConnectTimeout),
}
} else if t, ok := trans.(*http.Transport); ok {
if t.TLSClientConfig == nil {
t.TLSClientConfig = r.setting.TLSClientConfig
}
if t.DialContext == nil {
t.DialContext = TimeoutDialer(r.setting.ConnectTimeout)
}
}
client := &http.Client{
Transport: trans,
Timeout: r.setting.ReadWriteTimeout,
Transport: r.transport,
Timeout: r.readWriteTimeout,
}
if client.Transport == nil {
client.Transport = defaultTransport()
}
if len(r.setting.UserAgent) > 0 && len(r.req.Header.Get("User-Agent")) == 0 {
r.req.Header.Set("User-Agent", r.setting.UserAgent)
if r.req.Header.Get("User-Agent") == "" {
r.req.Header.Set("User-Agent", "GiteaHttpLib")
}
resp, err := client.Do(r.req)
if err != nil {
return nil, err
}
r.resp = resp
return resp, nil
}
// Response executes request client gets response manually.
// Caller MUST close the response body if no error occurs
func (r *Request) Response() (*http.Response, error) {
if r == nil {
return nil, errors.New("invalid request")
}
return r.getResponse()
}
// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field.
func TimeoutDialer(cTimeout time.Duration) func(ctx context.Context, net, addr string) (c net.Conn, err error) {
return func(ctx context.Context, netw, addr string) (net.Conn, error) {
d := net.Dialer{Timeout: cTimeout}
conn, err := d.DialContext(ctx, netw, addr)
if err != nil {
return nil, err
}
return conn, nil
}
return client.Do(r.req)
}
func (r *Request) GoString() string {

View File

@ -157,7 +157,7 @@ func (g *GiteaBackend) Batch(_ string, pointers []transfer.BatchItem, args trans
}
// Download implements transfer.Backend. The returned reader must be closed by the caller.
func (g *GiteaBackend) Download(oid string, args transfer.Args) (io.ReadCloser, int64, error) {
func (g *GiteaBackend) Download(oid string, args transfer.Args) (_ io.ReadCloser, _ int64, retErr error) {
idMapStr, exists := args[argID]
if !exists {
return nil, 0, ErrMissingID
@ -188,7 +188,15 @@ func (g *GiteaBackend) Download(oid string, args transfer.Args) (io.ReadCloser,
if err != nil {
return nil, 0, fmt.Errorf("failed to get response: %w", err)
}
// no need to close the body here by "defer resp.Body.Close()", see below
// We must return the ReaderCloser but not "ReadAll", to avoid OOM.
// "transfer.Backend" will check io.Closer interface and close the Body reader.
// So only close the Body when error occurs
defer func() {
if retErr != nil {
_ = resp.Body.Close()
}
}()
if resp.StatusCode != http.StatusOK {
return nil, 0, statusCodeToErr(resp.StatusCode)
}
@ -197,7 +205,6 @@ func (g *GiteaBackend) Download(oid string, args transfer.Args) (io.ReadCloser,
if err != nil {
return nil, 0, fmt.Errorf("failed to parse content length: %w", err)
}
// transfer.Backend will check io.Closer interface and close this Body reader
return resp.Body, respSize, nil
}

View File

@ -10,6 +10,7 @@ import (
"net/http"
"os"
"strings"
"sync"
"time"
"code.gitea.io/gitea/modules/httplib"
@ -33,6 +34,35 @@ func getClientIP() string {
return strings.Fields(sshConnEnv)[0]
}
func dialContextInternalAPI(ctx context.Context, network, address string) (conn net.Conn, err error) {
d := net.Dialer{Timeout: 10 * time.Second}
if setting.Protocol == setting.HTTPUnix {
conn, err = d.DialContext(ctx, "unix", setting.HTTPAddr)
} else {
conn, err = d.DialContext(ctx, network, address)
}
if err != nil {
return nil, err
}
if setting.LocalUseProxyProtocol {
if err = proxyprotocol.WriteLocalHeader(conn); err != nil {
_ = conn.Close()
return nil, err
}
}
return conn, nil
}
var internalAPITransport = sync.OnceValue(func() http.RoundTripper {
return &http.Transport{
DialContext: dialContextInternalAPI,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
ServerName: setting.Domain,
},
}
})
func NewInternalRequest(ctx context.Context, url, method string) *httplib.Request {
if setting.InternalToken == "" {
log.Fatal(`The INTERNAL_TOKEN setting is missing from the configuration file: %q.
@ -43,49 +73,11 @@ Ensure you are running in the correct environment or set the correct configurati
log.Fatal("Invalid internal request URL: %q", url)
}
req := httplib.NewRequest(url, method).
return httplib.NewRequest(url, method).
SetContext(ctx).
SetTransport(internalAPITransport()).
Header("X-Real-IP", getClientIP()).
Header("X-Gitea-Internal-Auth", "Bearer "+setting.InternalToken).
SetTLSClientConfig(&tls.Config{
InsecureSkipVerify: true,
ServerName: setting.Domain,
})
if setting.Protocol == setting.HTTPUnix {
req.SetTransport(&http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
var d net.Dialer
conn, err := d.DialContext(ctx, "unix", setting.HTTPAddr)
if err != nil {
return conn, err
}
if setting.LocalUseProxyProtocol {
if err = proxyprotocol.WriteLocalHeader(conn); err != nil {
_ = conn.Close()
return nil, err
}
}
return conn, err
},
})
} else if setting.LocalUseProxyProtocol {
req.SetTransport(&http.Transport{
DialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
var d net.Dialer
conn, err := d.DialContext(ctx, network, address)
if err != nil {
return conn, err
}
if err = proxyprotocol.WriteLocalHeader(conn); err != nil {
_ = conn.Close()
return nil, err
}
return conn, err
},
})
}
return req
Header("X-Gitea-Internal-Auth", "Bearer "+setting.InternalToken)
}
func newInternalRequestAPI(ctx context.Context, url, method string, body ...any) *httplib.Request {
@ -98,6 +90,6 @@ func newInternalRequestAPI(ctx context.Context, url, method string, body ...any)
log.Fatal("Too many arguments for newInternalRequestAPI")
}
req.SetTimeout(10*time.Second, 60*time.Second)
req.SetReadWriteTimeout(60 * time.Second)
return req
}

View File

@ -6,7 +6,6 @@ package private
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/modules/setting"
)
@ -31,6 +30,6 @@ func RestoreRepo(ctx context.Context, repoDir, ownerName, repoName string, units
Units: units,
Validation: validation,
})
req.SetTimeout(3*time.Second, 0) // since the request will spend much time, don't timeout
req.SetReadWriteTimeout(0) // since the request will spend much time, don't timeout
return requestJSONClientMsg(req, fmt.Sprintf("Restore repo %s/%s successfully", ownerName, repoName))
}

View File

@ -109,6 +109,7 @@ copy_path=パスをコピー
copy_success=コピーされました!
copy_error=コピーに失敗しました
copy_type_unsupported=このファイルタイプはコピーできません
copy_filename=ファイル名をコピー
write=書き込み
preview=プレビュー
@ -2433,6 +2434,9 @@ settings.event_workflow_job_desc=Gitea Actions のワークフロージョブが
settings.event_package=パッケージ
settings.event_package_desc=リポジトリにパッケージが作成または削除されたとき。
settings.branch_filter=ブランチ フィルター
settings.branch_filter_desc_1=プッシュ、ブランチ作成、ブランチ削除イベントに対するブランチ(およびref名)の許可リストで、globパターンで指定します。 空または<code>*</code>の場合、すべてのブランチとタグのイベントが報告されます。
settings.branch_filter_desc_2=完全なref名にマッチさせるには、 <code>refs/heads/</code> または <code>refs/tags/</code> を前に付けてください。
settings.branch_filter_desc_doc=書き方についてはドキュメント <a href="%[1]s">%[2]s</a> を参照してください。
settings.authorization_header=Authorizationヘッダー
settings.authorization_header_desc=入力した場合、リクエストにAuthorizationヘッダーとして付加します。 例: %s
settings.active=有効

View File

@ -2075,6 +2075,8 @@ settings=Configurações
settings.desc=Configurações é onde você pode gerenciar as opções para o repositório.
settings.options=Repositório
settings.public_access=Acesso Público
settings.public_access_desc=Configurar permissões de acesso do visitante público para substituir os padrões deste repositório.
settings.public_access.docs.not_set=Não definido: nenhuma permissão extra de acesso público. A permissão do visitante segue a visibilidade e as permissões de membro do repositório.
settings.collaboration=Colaboradores
settings.collaboration.admin=Administrador
settings.collaboration.write=Escrita
@ -2760,6 +2762,11 @@ view_as_role=Ver como: %s
view_as_public_hint=Você está vendo o README como um usuário público.
view_as_member_hint=Você está vendo o README como um membro desta organização.
worktime.date_range_start=Data de início
worktime.date_range_end=Data de término
worktime.by_repositories=Por repositórios
worktime.by_milestones=Por marcos
worktime.by_members=Por membros
[admin]
maintenance=Manutenção
@ -3371,6 +3378,7 @@ versions=Versões
versions.view_all=Ver todas
dependency.id=ID
dependency.version=Versão
search_in_external_registry=Pesquisar em %s
alpine.registry=Configure este registro adicionando o URL no arquivo <code>/etc/apk/repositories</code>:
alpine.registry.key=Baixe a chave RSA pública do registro para a pasta <code>/etc/apk/keys/</code> para verificar a assinatura do índice:
alpine.registry.info=Escolha o $branch e $repository da lista abaixo.
@ -3398,6 +3406,7 @@ conda.install=Para instalar o pacote usando o Conda, execute o seguinte comando:
container.details.type=Tipo de Imagem
container.details.platform=Plataforma
container.pull=Puxe a imagem pela linha de comando:
container.images=Imagens
container.digest=Digest
container.multi_arch=S.O. / Arquitetura
container.layers=Camadas da Imagem
@ -3506,6 +3515,8 @@ creation.name_placeholder=apenas caracteres alfanuméricos ou underline (_), nã
creation.value_placeholder=Insira qualquer conteúdo. Espaços em branco no início e no fim serão omitidos.
add_secret=Adicionar segredo
edit_secret=Editar segredo
deletion=Excluir segredo
deletion.description=A exclusão de um segredo é permanente e não pode ser desfeita. Continuar?
deletion.success=O segredo foi excluído.
@ -3605,9 +3616,11 @@ variables.update.success=A variável foi editada.
[projects]
deleted.display_name=Excluir Projeto
type-1.display_name=Projeto Individual
type-2.display_name=Projeto do Repositório
type-3.display_name=Projeto da Organização
enter_fullscreen=Tela cheia
exit_fullscreen=Sair da Tela Cheia
[git.filemode]

View File

@ -109,6 +109,7 @@ copy_path=复制路径
copy_success=复制成功!
copy_error=复制失败
copy_type_unsupported=无法复制此类型的文件内容
copy_filename=复制文件名
write=撰写
preview=预览
@ -2434,6 +2435,9 @@ settings.event_workflow_job_desc=Gitea 工作流队列中、等待中、正在
settings.event_package=软件包
settings.event_package_desc=软件包在仓库中已创建或删除。
settings.branch_filter=分支过滤
settings.branch_filter_desc_1=推送、分支创建和分支删除事件的分支(和引用名称)白名单,以全局模式指定。如果为空或为 <code>*</code>,则报告所有分支和标签的事件。
settings.branch_filter_desc_2=使用 <code>refs/heads/</code> 或 <code>refs/tags/</code> 前缀来匹配完整的引用名称。
settings.branch_filter_desc_doc=请参阅 <a href="%[1]s">%[2]s</a> 文档了解语法。
settings.authorization_header=授权标头
settings.authorization_header_desc=当存在时将被作为授权标头包含在内。例如: %s。
settings.active=激活

View File

@ -15,13 +15,13 @@
"@github/relative-time-element": "4.4.8",
"@github/text-expander-element": "2.9.2",
"@mcaptcha/vanilla-glue": "0.1.0-alpha-3",
"@primer/octicons": "19.18.0",
"@primer/octicons": "19.19.0",
"@resvg/resvg-wasm": "2.6.2",
"@silverwind/vue3-calendar-heatmap": "2.0.6",
"@techknowlogick/license-checker-webpack-plugin": "0.3.0",
"add-asset-webpack-plugin": "3.1.1",
"ansi_up": "6.0.6",
"asciinema-player": "3.10.0",
"asciinema-player": "3.12.0",
"chart.js": "4.5.0",
"chartjs-adapter-dayjs-4": "1.0.4",
"chartjs-plugin-zoom": "2.2.0",
@ -31,22 +31,22 @@
"dayjs": "1.11.18",
"dropzone": "6.0.0-beta.2",
"easymde": "2.20.0",
"esbuild-loader": "4.3.0",
"esbuild-loader": "4.4.0",
"htmx.org": "2.0.7",
"idiomorph": "0.7.4",
"jquery": "3.7.1",
"katex": "0.16.22",
"katex": "0.16.23",
"mermaid": "11.12.0",
"mini-css-extract-plugin": "2.9.4",
"monaco-editor": "0.53.0",
"monaco-editor-webpack-plugin": "7.1.0",
"monaco-editor": "0.54.0",
"monaco-editor-webpack-plugin": "7.1.1",
"online-3d-viewer": "0.16.0",
"pdfobject": "2.3.1",
"perfect-debounce": "2.0.0",
"postcss": "8.5.6",
"postcss-loader": "8.2.0",
"sortablejs": "1.15.6",
"swagger-ui-dist": "5.29.1",
"swagger-ui-dist": "5.29.4",
"tailwindcss": "3.4.17",
"throttle-debounce": "5.0.2",
"tinycolor2": "1.6.0",
@ -60,13 +60,13 @@
"vue-bar-graph": "2.2.0",
"vue-chartjs": "5.3.2",
"vue-loader": "17.4.2",
"webpack": "5.102.0",
"webpack": "5.102.1",
"webpack-cli": "6.0.1",
"wrap-ansi": "9.0.2"
},
"devDependencies": {
"@eslint-community/eslint-plugin-eslint-comments": "4.5.0",
"@playwright/test": "1.55.1",
"@playwright/test": "1.56.0",
"@stylistic/eslint-plugin": "5.4.0",
"@stylistic/stylelint-plugin": "4.0.0",
"@types/codemirror": "5.60.16",
@ -79,10 +79,10 @@
"@types/throttle-debounce": "5.0.2",
"@types/tinycolor2": "1.4.6",
"@types/toastify-js": "1.12.4",
"@typescript-eslint/parser": "8.45.0",
"@typescript-eslint/parser": "8.46.0",
"@vitejs/plugin-vue": "6.0.1",
"@vitest/eslint-plugin": "1.3.13",
"eslint": "9.36.0",
"@vitest/eslint-plugin": "1.3.16",
"eslint": "9.37.0",
"eslint-import-resolver-typescript": "4.4.4",
"eslint-plugin-array-func": "5.1.0",
"eslint-plugin-github": "6.0.0",
@ -96,23 +96,23 @@
"eslint-plugin-vue-scoped-css": "2.12.0",
"eslint-plugin-wc": "3.0.2",
"globals": "16.4.0",
"happy-dom": "19.0.2",
"happy-dom": "20.0.2",
"markdownlint-cli": "0.45.0",
"material-icon-theme": "5.27.0",
"nolyfill": "1.0.44",
"postcss-html": "1.8.0",
"spectral-cli-bundle": "1.0.3",
"stylelint": "16.24.0",
"stylelint": "16.25.0",
"stylelint-config-recommended": "17.0.0",
"stylelint-declaration-block-no-ignored-properties": "2.8.0",
"stylelint-declaration-strict-value": "1.10.11",
"stylelint-value-no-unknown-custom-properties": "6.0.1",
"svgo": "4.0.0",
"typescript-eslint": "8.45.0",
"updates": "16.7.4",
"typescript-eslint": "8.46.0",
"updates": "16.8.0",
"vite-string-plugin": "1.4.6",
"vitest": "3.2.4",
"vue-tsc": "3.1.0"
"vue-tsc": "3.1.1"
},
"browserslist": [
"defaults"

File diff suppressed because it is too large Load Diff

1
public/assets/img/svg/gitea-running.svg generated Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" class="svg gitea-running" width="16" height="16" aria-hidden="true"><path fill="none" stroke="currentColor" stroke-width="2" d="M3.05 3.05a7 7 0 1 1 9.9 9.9 7 7 0 0 1-9.9-9.9Z" opacity=".5"/><path fill="currentColor" fill-rule="evenodd" d="M8 4a4 4 0 1 0 0 8 4 4 0 0 0 0-8" clip-rule="evenodd"/><path fill="currentColor" d="M14 8a6 6 0 0 0-6-6V0a8 8 0 0 1 8 8z"/></svg>

After

Width:  |  Height:  |  Size: 429 B

View File

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="-1 -1 34 34" class="svg gitea-vscode" width="16" height="16" aria-hidden="true"><path d="M30.9 3.4 24.3.3a2 2 0 0 0-2.3.4L9.4 12.2 3.9 8c-.5-.4-1.2-.4-1.7 0L.4 9.8c-.5.5-.5 1.4 0 2L5.2 16 .4 20.3c-.5.6-.5 1.5 0 2L2.2 24c.5.5 1.2.5 1.7 0l5.5-4L22 31.2a2 2 0 0 0 2.3.4l6.6-3.2a2 2 0 0 0 1.1-1.8V5.2a2 2 0 0 0-1.1-1.8M24 23.3 14.4 16 24 8.7z"/></svg>

Before

Width:  |  Height:  |  Size: 396 B

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" class="svg octicon-comment-ai" width="16" height="16" aria-hidden="true"><path d="M7.75 1a.75.75 0 0 1 0 1.5h-5a.25.25 0 0 0-.25.25v7.5c0 .138.112.25.25.25h2c.199 0 .39.079.53.22.141.14.22.331.22.53v2.19l2.72-2.72a.75.75 0 0 1 .53-.22h4.5a.25.25 0 0 0 .25-.25v-2a.75.75 0 0 1 1.5 0v2c0 .464-.184.909-.513 1.237A1.75 1.75 0 0 1 13.25 12H9.06l-2.573 2.573A1.457 1.457 0 0 1 4 13.543V12H2.75A1.75 1.75 0 0 1 1 10.25v-7.5C1 1.784 1.784 1 2.75 1zm4.519-.837a.248.248 0 0 1 .466 0l.238.648a3.73 3.73 0 0 0 2.218 2.219l.649.238a.249.249 0 0 1 0 .467l-.649.238a3.73 3.73 0 0 0-2.218 2.218l-.238.649a.248.248 0 0 1-.466 0l-.239-.649a3.73 3.73 0 0 0-2.218-2.218l-.649-.238a.249.249 0 0 1 0-.467l.649-.238A3.73 3.73 0 0 0 12.03.811z"/></svg>

After

Width:  |  Height:  |  Size: 790 B

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" class="svg octicon-vscode" width="16" height="16" aria-hidden="true"><path d="M12.515.537c1.169-1.215 3.48-.226 3.418 1.534a593 593 0 0 1 .062 11.538c.089 1.938-2.439 3.149-3.827 1.851A643 643 0 0 1 1.312 5.996a.93.93 0 0 1-.308-.609.92.92 0 0 1 .194-.655.87.87 0 0 1 1.232-.136l1.493 1.18a641 641 0 0 1 9.708 7.85c.008.011.036-.018.019-.017a606 606 0 0 1 .057-11.226c-1.308 1.157-2.63 2.275-3.926 3.411-.477.416-.948.831-1.424 1.253a.87.87 0 0 1-1.237-.061.9.9 0 0 1-.231-.641.94.94 0 0 1 .27-.628c.452-.456.902-.905 1.36-1.354 1.324-1.302 2.677-2.558 3.996-3.826M2.986 9.734a.8.8 0 0 1 1.184.06.95.95 0 0 1-.057 1.272l-1.228 1.2a.8.8 0 0 1-1.183-.06.95.95 0 0 1 .055-1.272z"/></svg>

After

Width:  |  Height:  |  Size: 744 B

View File

@ -217,19 +217,19 @@ func (s *Service) UpdateTask(
return nil, status.Errorf(codes.Internal, "load run: %v", err)
}
// don't create commit status for cron job
if task.Job.Run.ScheduleID == 0 {
actions_service.CreateCommitStatus(ctx, task.Job)
}
actions_service.CreateCommitStatusForRunJobs(ctx, task.Job.Run, task.Job)
if task.Status.IsDone() {
notify_service.WorkflowJobStatusUpdate(ctx, task.Job.Run.Repo, task.Job.Run.TriggerUser, task.Job, task)
}
if req.Msg.State.Result != runnerv1.Result_RESULT_UNSPECIFIED {
if err := actions_service.EmitJobsIfReady(task.Job.RunID); err != nil {
if err := actions_service.EmitJobsIfReadyByRun(task.Job.RunID); err != nil {
log.Error("Emit ready jobs of run %d: %v", task.Job.RunID, err)
}
if task.Job.Run.Status.IsDone() {
actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, task.Job)
}
}
return connect.NewResponse(&runnerv1.UpdateTaskResponse{

View File

@ -1423,6 +1423,7 @@ func Routes() *web.Router {
m.Get("/tags/{sha}", repo.GetAnnotatedTag)
m.Get("/notes/{sha}", repo.GetNote)
}, context.ReferencesGitRepo(true), reqRepoReader(unit.TypeCode))
m.Post("/diffpatch", mustEnableEditor, reqToken(), bind(api.ApplyDiffPatchFileOptions{}), repo.ReqChangeRepoFileOptionsAndCheck, repo.ApplyDiffPatch)
m.Group("/contents", func() {
m.Get("", repo.GetContentsList)
m.Get("/*", repo.GetContents)
@ -1434,7 +1435,6 @@ func Routes() *web.Router {
m.Put("", bind(api.UpdateFileOptions{}), repo.ReqChangeRepoFileOptionsAndCheck, repo.UpdateFile)
m.Delete("", bind(api.DeleteFileOptions{}), repo.ReqChangeRepoFileOptionsAndCheck, repo.DeleteFile)
})
m.Post("/diffpatch", bind(api.ApplyDiffPatchFileOptions{}), repo.ReqChangeRepoFileOptionsAndCheck, repo.ApplyDiffPatch)
}, mustEnableEditor, reqToken())
}, reqRepoReader(unit.TypeCode), context.ReferencesGitRepo())
m.Group("/contents-ext", func() {

View File

@ -36,7 +36,7 @@ func ApplyDiffPatch(ctx *context.APIContext) {
// in: body
// required: true
// schema:
// "$ref": "#/definitions/UpdateFileOptions"
// "$ref": "#/definitions/ApplyDiffPatchFileOptions"
// responses:
// "200":
// "$ref": "#/responses/FileResponse"

View File

@ -121,6 +121,9 @@ type swaggerParameterBodies struct {
// in:body
GetFilesOptions api.GetFilesOptions
// in:body
ApplyDiffPatchFileOptions api.ApplyDiffPatchFileOptions
// in:body
ChangeFilesOptions api.ChangeFilesOptions

View File

@ -636,6 +636,7 @@ func handleAuthorizationCode(ctx *context.Context, form forms.AccessTokenForm, s
ErrorCode: oauth2_provider.AccessTokenErrorCodeInvalidRequest,
ErrorDescription: "cannot proceed your request",
})
return
}
resp, tokenErr := oauth2_provider.NewAccessTokenResponse(ctx, authorizationCode.Grant, serverKey, clientKey)
if tokenErr != nil {

View File

@ -27,7 +27,6 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/routers/common"
@ -36,6 +35,7 @@ import (
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/model"
"gopkg.in/yaml.v3"
"xorm.io/builder"
)
@ -420,12 +420,45 @@ func Rerun(ctx *context_module.Context) {
return
}
// check run (workflow-level) concurrency
job, jobs := getRunJobs(ctx, runIndex, jobIndex)
if ctx.Written() {
return
}
// reset run's start and stop time when it is done
if run.Status.IsDone() {
run.PreviousDuration = run.Duration()
run.Started = 0
run.Stopped = 0
if err := actions_model.UpdateRun(ctx, run, "started", "stopped", "previous_duration"); err != nil {
vars, err := actions_model.GetVariablesOfRun(ctx, run)
if err != nil {
ctx.ServerError("GetVariablesOfRun", fmt.Errorf("get run %d variables: %w", run.ID, err))
return
}
if run.RawConcurrency != "" {
var rawConcurrency model.RawConcurrency
if err := yaml.Unmarshal([]byte(run.RawConcurrency), &rawConcurrency); err != nil {
ctx.ServerError("UnmarshalRawConcurrency", fmt.Errorf("unmarshal raw concurrency: %w", err))
return
}
err = actions_service.EvaluateRunConcurrencyFillModel(ctx, run, &rawConcurrency, vars)
if err != nil {
ctx.ServerError("EvaluateRunConcurrencyFillModel", err)
return
}
run.Status, err = actions_service.PrepareToStartRunWithConcurrency(ctx, run)
if err != nil {
ctx.ServerError("PrepareToStartRunWithConcurrency", err)
return
}
}
if err := actions_model.UpdateRun(ctx, run, "started", "stopped", "previous_duration", "status", "concurrency_group", "concurrency_cancel"); err != nil {
ctx.ServerError("UpdateRun", err)
return
}
@ -437,16 +470,12 @@ func Rerun(ctx *context_module.Context) {
notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run)
}
job, jobs := getRunJobs(ctx, runIndex, jobIndex)
if ctx.Written() {
return
}
isRunBlocked := run.Status == actions_model.StatusBlocked
if jobIndexStr == "" { // rerun all jobs
for _, j := range jobs {
// if the job has needs, it should be set to "blocked" status to wait for other jobs
shouldBlock := len(j.Needs) > 0
if err := rerunJob(ctx, j, shouldBlock); err != nil {
shouldBlockJob := len(j.Needs) > 0 || isRunBlocked
if err := rerunJob(ctx, j, shouldBlockJob); err != nil {
ctx.ServerError("RerunJob", err)
return
}
@ -459,8 +488,8 @@ func Rerun(ctx *context_module.Context) {
for _, j := range rerunJobs {
// jobs other than the specified one should be set to "blocked" status
shouldBlock := j.JobID != job.JobID
if err := rerunJob(ctx, j, shouldBlock); err != nil {
shouldBlockJob := j.JobID != job.JobID || isRunBlocked
if err := rerunJob(ctx, j, shouldBlockJob); err != nil {
ctx.ServerError("RerunJob", err)
return
}
@ -476,21 +505,43 @@ func rerunJob(ctx *context_module.Context, job *actions_model.ActionRunJob, shou
}
job.TaskID = 0
job.Status = actions_model.StatusWaiting
if shouldBlock {
job.Status = actions_model.StatusBlocked
}
job.Status = util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting)
job.Started = 0
job.Stopped = 0
job.ConcurrencyGroup = ""
job.ConcurrencyCancel = false
job.IsConcurrencyEvaluated = false
if err := job.LoadRun(ctx); err != nil {
return err
}
vars, err := actions_model.GetVariablesOfRun(ctx, job.Run)
if err != nil {
return fmt.Errorf("get run %d variables: %w", job.Run.ID, err)
}
if job.RawConcurrency != "" && !shouldBlock {
err = actions_service.EvaluateJobConcurrencyFillModel(ctx, job.Run, job, vars)
if err != nil {
return fmt.Errorf("evaluate job concurrency: %w", err)
}
job.Status, err = actions_service.PrepareToStartJobWithConcurrency(ctx, job)
if err != nil {
return err
}
}
if err := db.WithTx(ctx, func(ctx context.Context) error {
_, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": status}, "task_id", "status", "started", "stopped")
updateCols := []string{"task_id", "status", "started", "stopped", "concurrency_group", "concurrency_cancel", "is_concurrency_evaluated"}
_, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": status}, updateCols...)
return err
}); err != nil {
return err
}
actions_service.CreateCommitStatus(ctx, job)
actions_service.CreateCommitStatusForRunJobs(ctx, job.Run, job)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
return nil
@ -518,52 +569,34 @@ func Logs(ctx *context_module.Context) {
func Cancel(ctx *context_module.Context) {
runIndex := getRunIndex(ctx)
_, jobs := getRunJobs(ctx, runIndex, -1)
firstJob, jobs := getRunJobs(ctx, runIndex, -1)
if ctx.Written() {
return
}
var updatedjobs []*actions_model.ActionRunJob
var updatedJobs []*actions_model.ActionRunJob
if err := db.WithTx(ctx, func(ctx context.Context) error {
for _, job := range jobs {
status := job.Status
if status.IsDone() {
continue
}
if job.TaskID == 0 {
job.Status = actions_model.StatusCancelled
job.Stopped = timeutil.TimeStampNow()
n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}, "status", "stopped")
if err != nil {
return err
}
if n == 0 {
return errors.New("job has changed, try again")
}
if n > 0 {
updatedjobs = append(updatedjobs, job)
}
continue
}
if err := actions_model.StopTask(ctx, job.TaskID, actions_model.StatusCancelled); err != nil {
return err
}
cancelledJobs, err := actions_model.CancelJobs(ctx, jobs)
if err != nil {
return fmt.Errorf("cancel jobs: %w", err)
}
updatedJobs = append(updatedJobs, cancelledJobs...)
return nil
}); err != nil {
ctx.ServerError("StopTask", err)
return
}
actions_service.CreateCommitStatus(ctx, jobs...)
actions_service.CreateCommitStatusForRunJobs(ctx, firstJob.Run, jobs...)
actions_service.EmitJobsIfReadyByJobs(updatedJobs)
for _, job := range updatedjobs {
for _, job := range updatedJobs {
_ = job.LoadAttributes(ctx)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
}
if len(updatedjobs) > 0 {
job := updatedjobs[0]
if len(updatedJobs) > 0 {
job := updatedJobs[0]
actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, job)
}
ctx.JSONOK()
@ -579,40 +612,44 @@ func Approve(ctx *context_module.Context) {
run := current.Run
doer := ctx.Doer
var updatedjobs []*actions_model.ActionRunJob
var updatedJobs []*actions_model.ActionRunJob
if err := db.WithTx(ctx, func(ctx context.Context) error {
err := db.WithTx(ctx, func(ctx context.Context) (err error) {
run.NeedApproval = false
run.ApprovedBy = doer.ID
if err := actions_model.UpdateRun(ctx, run, "need_approval", "approved_by"); err != nil {
return err
}
for _, job := range jobs {
if len(job.Needs) == 0 && job.Status.IsBlocked() {
job.Status = actions_model.StatusWaiting
job.Status, err = actions_service.PrepareToStartJobWithConcurrency(ctx, job)
if err != nil {
return err
}
if job.Status == actions_model.StatusWaiting {
n, err := actions_model.UpdateRunJob(ctx, job, nil, "status")
if err != nil {
return err
}
if n > 0 {
updatedjobs = append(updatedjobs, job)
updatedJobs = append(updatedJobs, job)
}
}
}
return nil
}); err != nil {
})
if err != nil {
ctx.ServerError("UpdateRunJob", err)
return
}
actions_service.CreateCommitStatus(ctx, jobs...)
actions_service.CreateCommitStatusForRunJobs(ctx, current.Run, jobs...)
if len(updatedjobs) > 0 {
job := updatedjobs[0]
if len(updatedJobs) > 0 {
job := updatedJobs[0]
actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, job)
}
for _, job := range updatedjobs {
for _, job := range updatedJobs {
_ = job.LoadAttributes(ctx)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
}

View File

@ -296,14 +296,14 @@ func EditFile(ctx *context.Context) {
}
defer dataRc.Close()
ctx.Data["FileSize"] = fInfo.fileSize
ctx.Data["FileSize"] = fInfo.blobOrLfsSize
// Only some file types are editable online as text.
if fInfo.isLFSFile() {
ctx.Data["NotEditableReason"] = ctx.Tr("repo.editor.cannot_edit_lfs_files")
} else if !fInfo.st.IsRepresentableAsText() {
ctx.Data["NotEditableReason"] = ctx.Tr("repo.editor.cannot_edit_non_text_files")
} else if fInfo.fileSize >= setting.UI.MaxDisplayFileSize {
} else if fInfo.blobOrLfsSize >= setting.UI.MaxDisplayFileSize {
ctx.Data["NotEditableReason"] = ctx.Tr("repo.editor.cannot_edit_too_large_file")
}

View File

@ -7,11 +7,10 @@ package repo
import (
"bytes"
"compress/gzip"
gocontext "context"
"fmt"
"net/http"
"os"
"path/filepath"
"path"
"regexp"
"slices"
"strconv"
@ -27,6 +26,7 @@ import (
"code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/log"
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
@ -342,11 +342,11 @@ type serviceHandler struct {
environ []string
}
func (h *serviceHandler) getRepoDir() string {
func (h *serviceHandler) getStorageRepo() gitrepo.Repository {
if h.isWiki {
return h.repo.WikiPath()
return h.repo.WikiStorageRepo()
}
return h.repo.RepoPath()
return h.repo
}
func setHeaderNoCache(ctx *context.Context) {
@ -378,19 +378,10 @@ func (h *serviceHandler) sendFile(ctx *context.Context, contentType, file string
ctx.Resp.WriteHeader(http.StatusBadRequest)
return
}
reqFile := filepath.Join(h.getRepoDir(), filepath.Clean(file))
fi, err := os.Stat(reqFile)
if os.IsNotExist(err) {
ctx.Resp.WriteHeader(http.StatusNotFound)
return
}
fs := gitrepo.GetRepoFS(h.getStorageRepo())
ctx.Resp.Header().Set("Content-Type", contentType)
ctx.Resp.Header().Set("Content-Length", strconv.FormatInt(fi.Size(), 10))
// http.TimeFormat required a UTC time, refer to https://pkg.go.dev/net/http#TimeFormat
ctx.Resp.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat))
http.ServeFile(ctx.Resp, ctx.Req, reqFile)
http.ServeFileFS(ctx.Resp, ctx.Req, fs, path.Clean(file))
}
// one or more key=value pairs separated by colons
@ -416,6 +407,7 @@ func serviceRPC(ctx *context.Context, h *serviceHandler, service string) {
expectedContentType := fmt.Sprintf("application/x-git-%s-request", service)
if ctx.Req.Header.Get("Content-Type") != expectedContentType {
log.Error("Content-Type (%q) doesn't match expected: %q", ctx.Req.Header.Get("Content-Type"), expectedContentType)
// FIXME: why it's 401 if the content type is unexpected?
ctx.Resp.WriteHeader(http.StatusUnauthorized)
return
}
@ -423,6 +415,7 @@ func serviceRPC(ctx *context.Context, h *serviceHandler, service string) {
cmd, err := prepareGitCmdWithAllowedService(service)
if err != nil {
log.Error("Failed to prepareGitCmdWithService: %v", err)
// FIXME: why it's 401 if the service type doesn't supported?
ctx.Resp.WriteHeader(http.StatusUnauthorized)
return
}
@ -449,17 +442,14 @@ func serviceRPC(ctx *context.Context, h *serviceHandler, service string) {
}
var stderr bytes.Buffer
if err := cmd.AddArguments("--stateless-rpc").
AddDynamicArguments(h.getRepoDir()).
WithDir(h.getRepoDir()).
if err := gitrepo.RunCmd(ctx, h.getStorageRepo(), cmd.AddArguments("--stateless-rpc", ".").
WithEnv(append(os.Environ(), h.environ...)).
WithStderr(&stderr).
WithStdin(reqBody).
WithStdout(ctx.Resp).
WithUseContextTimeout(true).
Run(ctx); err != nil {
WithUseContextTimeout(true)); err != nil {
if !git.IsErrCanceledOrKilled(err) {
log.Error("Fail to serve RPC(%s) in %s: %v - %s", service, h.getRepoDir(), err, stderr.String())
log.Error("Fail to serve RPC(%s) in %s: %v - %s", service, h.getStorageRepo().RelativePath(), err, stderr.String())
}
return
}
@ -496,14 +486,6 @@ func getServiceType(ctx *context.Context) string {
return ""
}
func updateServerInfo(ctx gocontext.Context, dir string) []byte {
out, _, err := gitcmd.NewCommand("update-server-info").WithDir(dir).RunStdBytes(ctx)
if err != nil {
log.Error(fmt.Sprintf("%v - %s", err, string(out)))
}
return out
}
func packetWrite(str string) []byte {
s := strconv.FormatInt(int64(len(str)+4), 16)
if len(s)%4 != 0 {
@ -527,10 +509,8 @@ func GetInfoRefs(ctx *context.Context) {
}
h.environ = append(os.Environ(), h.environ...)
refs, _, err := cmd.AddArguments("--stateless-rpc", "--advertise-refs", ".").
WithEnv(h.environ).
WithDir(h.getRepoDir()).
RunStdBytes(ctx)
refs, _, err := gitrepo.RunCmdBytes(ctx, h.getStorageRepo(), cmd.AddArguments("--stateless-rpc", "--advertise-refs", ".").
WithEnv(h.environ))
if err != nil {
log.Error(fmt.Sprintf("%v - %s", err, string(refs)))
}
@ -541,7 +521,9 @@ func GetInfoRefs(ctx *context.Context) {
_, _ = ctx.Resp.Write([]byte("0000"))
_, _ = ctx.Resp.Write(refs)
} else {
updateServerInfo(ctx, h.getRepoDir())
if err := gitrepo.UpdateServerInfo(ctx, h.getStorageRepo()); err != nil {
log.Error("Failed to update server info: %v", err)
}
h.sendFile(ctx, "text/plain; charset=utf-8", "info/refs")
}
}

View File

@ -270,8 +270,7 @@ func LFSFileGet(ctx *context.Context) {
// FIXME: there is no IsPlainText set, but template uses it
ctx.Data["IsTextFile"] = st.IsText()
ctx.Data["FileSize"] = meta.Size
// FIXME: the last field is the URL-base64-encoded filename, it should not be "direct"
ctx.Data["RawFileLink"] = fmt.Sprintf("%s%s/%s.git/info/lfs/objects/%s/%s", setting.AppURL, url.PathEscape(ctx.Repo.Repository.OwnerName), url.PathEscape(ctx.Repo.Repository.Name), url.PathEscape(meta.Oid), "direct")
ctx.Data["RawFileLink"] = fmt.Sprintf("%s/%s/%s.git/info/lfs/objects/%s", setting.AppSubURL, url.PathEscape(ctx.Repo.Repository.OwnerName), url.PathEscape(ctx.Repo.Repository.Name), url.PathEscape(meta.Oid))
switch {
case st.IsRepresentableAsText():
if meta.Size >= setting.UI.MaxDisplayFileSize {

View File

@ -29,7 +29,6 @@ import (
"code.gitea.io/gitea/modules/validation"
"code.gitea.io/gitea/modules/web"
actions_service "code.gitea.io/gitea/services/actions"
asymkey_service "code.gitea.io/gitea/services/asymkey"
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/forms"
"code.gitea.io/gitea/services/migrations"
@ -62,7 +61,7 @@ func SettingsCtxData(ctx *context.Context) {
ctx.Data["MinimumMirrorInterval"] = setting.Mirror.MinInterval
ctx.Data["CanConvertFork"] = ctx.Repo.Repository.IsFork && ctx.Doer.CanCreateRepoIn(ctx.Repo.Repository.Owner)
signing, _ := asymkey_service.SigningKey(ctx, ctx.Repo.Repository.RepoPath())
signing, _ := gitrepo.GetSigningKey(ctx, ctx.Repo.Repository)
ctx.Data["SigningKeyAvailable"] = signing != nil
ctx.Data["SigningSettings"] = setting.Repository.Signing
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled
@ -105,7 +104,7 @@ func SettingsPost(ctx *context.Context) {
ctx.Data["DefaultMirrorInterval"] = setting.Mirror.DefaultInterval
ctx.Data["MinimumMirrorInterval"] = setting.Mirror.MinInterval
signing, _ := asymkey_service.SigningKey(ctx, ctx.Repo.Repository.RepoPath())
signing, _ := gitrepo.GetSigningKey(ctx, ctx.Repo.Repository)
ctx.Data["SigningKeyAvailable"] = signing != nil
ctx.Data["SigningSettings"] = setting.Repository.Signing
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled

View File

@ -60,9 +60,9 @@ const (
)
type fileInfo struct {
fileSize int64
lfsMeta *lfs.Pointer
st typesniffer.SniffedType
blobOrLfsSize int64
lfsMeta *lfs.Pointer
st typesniffer.SniffedType
}
func (fi *fileInfo) isLFSFile() bool {
@ -81,7 +81,7 @@ func getFileReader(ctx gocontext.Context, repoID int64, blob *git.Blob) (buf []b
n, _ := util.ReadAtMost(dataRc, buf)
buf = buf[:n]
fi = &fileInfo{fileSize: blob.Size(), st: typesniffer.DetectContentType(buf)}
fi = &fileInfo{blobOrLfsSize: blob.Size(), st: typesniffer.DetectContentType(buf)}
// FIXME: what happens when README file is an image?
if !fi.st.IsText() || !setting.LFS.StartServer {
@ -114,7 +114,7 @@ func getFileReader(ctx gocontext.Context, repoID int64, blob *git.Blob) (buf []b
}
buf = buf[:n]
fi.st = typesniffer.DetectContentType(buf)
fi.fileSize = blob.Size()
fi.blobOrLfsSize = meta.Pointer.Size
fi.lfsMeta = &meta.Pointer
return buf, dataRc, fi, nil
}

View File

@ -227,7 +227,7 @@ func prepareFileView(ctx *context.Context, entry *git.TreeEntry) {
}
ctx.Data["IsLFSFile"] = fInfo.isLFSFile()
ctx.Data["FileSize"] = fInfo.fileSize
ctx.Data["FileSize"] = fInfo.blobOrLfsSize
ctx.Data["IsRepresentableAsText"] = fInfo.st.IsRepresentableAsText()
ctx.Data["IsExecutable"] = entry.IsExecutable()
ctx.Data["CanCopyContent"] = fInfo.st.IsRepresentableAsText() || fInfo.st.IsImage()
@ -244,7 +244,7 @@ func prepareFileView(ctx *context.Context, entry *git.TreeEntry) {
utf8Reader := charset.ToUTF8WithFallbackReader(io.MultiReader(bytes.NewReader(buf), dataRc), charset.ConvertOpts{})
switch {
case fInfo.fileSize >= setting.UI.MaxDisplayFileSize:
case fInfo.blobOrLfsSize >= setting.UI.MaxDisplayFileSize:
ctx.Data["IsFileTooLarge"] = true
case handleFileViewRenderMarkup(ctx, entry.Name(), fInfo.st, buf, utf8Reader):
// it also sets ctx.Data["FileContent"] and more

View File

@ -6,7 +6,6 @@ package repo
import (
"errors"
"fmt"
"html/template"
"net/http"
"path"
"strconv"
@ -76,16 +75,24 @@ func prepareOpenWithEditorApps(ctx *context.Context) {
}
for _, app := range apps {
schema, _, _ := strings.Cut(app.OpenURL, ":")
var iconHTML template.HTML
if schema == "vscode" || schema == "vscodium" || schema == "jetbrains" {
iconHTML = svg.RenderHTML("gitea-"+schema, 16)
} else {
iconHTML = svg.RenderHTML("gitea-git", 16) // TODO: it could support user's customized icon in the future
var iconName string
switch schema {
case "vscode":
iconName = "octicon-vscode"
case "vscodium":
iconName = "gitea-vscodium"
case "jetbrains":
iconName = "gitea-jetbrains"
default:
// TODO: it could support user's customized icon in the future
iconName = "gitea-git"
}
tmplApps = append(tmplApps, map[string]any{
"DisplayName": app.DisplayName,
"OpenURL": app.OpenURL,
"IconHTML": iconHTML,
"IconHTML": svg.RenderHTML(iconName, 16),
})
}
ctx.Data["OpenWithEditorApps"] = tmplApps

View File

@ -171,7 +171,7 @@ func prepareToRenderReadmeFile(ctx *context.Context, subfolder string, readmeFil
ctx.Data["FileIsText"] = fInfo.st.IsText()
ctx.Data["FileTreePath"] = readmeFullPath
ctx.Data["FileSize"] = fInfo.fileSize
ctx.Data["FileSize"] = fInfo.blobOrLfsSize
ctx.Data["IsLFSFile"] = fInfo.isLFSFile()
if fInfo.isLFSFile() {
@ -183,7 +183,7 @@ func prepareToRenderReadmeFile(ctx *context.Context, subfolder string, readmeFil
return
}
if fInfo.fileSize >= setting.UI.MaxDisplayFileSize {
if fInfo.blobOrLfsSize >= setting.UI.MaxDisplayFileSize {
// Pretend that this is a normal text file to display 'This file is too large to be shown'
ctx.Data["IsFileTooLarge"] = true
return

View File

@ -15,6 +15,7 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
webhook_module "code.gitea.io/gitea/modules/webhook"
notify_service "code.gitea.io/gitea/services/notify"
)
@ -36,13 +37,19 @@ func StopEndlessTasks(ctx context.Context) error {
}
func notifyWorkflowJobStatusUpdate(ctx context.Context, jobs []*actions_model.ActionRunJob) {
if len(jobs) > 0 {
CreateCommitStatus(ctx, jobs...)
for _, job := range jobs {
_ = job.LoadAttributes(ctx)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
if len(jobs) == 0 {
return
}
for _, job := range jobs {
if err := job.LoadAttributes(ctx); err != nil {
log.Error("Failed to load job attributes: %v", err)
continue
}
job := jobs[0]
CreateCommitStatusForRunJobs(ctx, job.Run, job)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
}
if job := jobs[0]; job.Run != nil && job.Run.Repo != nil {
notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run)
}
}
@ -50,15 +57,84 @@ func notifyWorkflowJobStatusUpdate(ctx context.Context, jobs []*actions_model.Ac
func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID string, event webhook_module.HookEventType) error {
jobs, err := actions_model.CancelPreviousJobs(ctx, repoID, ref, workflowID, event)
notifyWorkflowJobStatusUpdate(ctx, jobs)
EmitJobsIfReadyByJobs(jobs)
return err
}
func CleanRepoScheduleTasks(ctx context.Context, repo *repo_model.Repository) error {
jobs, err := actions_model.CleanRepoScheduleTasks(ctx, repo)
notifyWorkflowJobStatusUpdate(ctx, jobs)
EmitJobsIfReadyByJobs(jobs)
return err
}
func shouldBlockJobByConcurrency(ctx context.Context, job *actions_model.ActionRunJob) (bool, error) {
if job.RawConcurrency != "" && !job.IsConcurrencyEvaluated {
// when the job depends on other jobs, we cannot evaluate its concurrency, so it should be blocked and will be evaluated again when its dependencies are done
return true, nil
}
if job.ConcurrencyGroup == "" || job.ConcurrencyCancel {
return false, nil
}
runs, jobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning})
if err != nil {
return false, fmt.Errorf("GetConcurrentRunsAndJobs: %w", err)
}
return len(runs) > 0 || len(jobs) > 0, nil
}
// PrepareToStartJobWithConcurrency prepares a job to start by its evaluated concurrency group and cancelling previous jobs if necessary.
// It returns the new status of the job (either StatusBlocked or StatusWaiting) and any error encountered during the process.
func PrepareToStartJobWithConcurrency(ctx context.Context, job *actions_model.ActionRunJob) (actions_model.Status, error) {
shouldBlock, err := shouldBlockJobByConcurrency(ctx, job)
if err != nil {
return actions_model.StatusBlocked, err
}
// even if the current job is blocked, we still need to cancel previous "waiting/blocked" jobs in the same concurrency group
jobs, err := actions_model.CancelPreviousJobsByJobConcurrency(ctx, job)
if err != nil {
return actions_model.StatusBlocked, fmt.Errorf("CancelPreviousJobsByJobConcurrency: %w", err)
}
notifyWorkflowJobStatusUpdate(ctx, jobs)
return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), nil
}
func shouldBlockRunByConcurrency(ctx context.Context, actionRun *actions_model.ActionRun) (bool, error) {
if actionRun.ConcurrencyGroup == "" || actionRun.ConcurrencyCancel {
return false, nil
}
runs, jobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, actionRun.RepoID, actionRun.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning})
if err != nil {
return false, fmt.Errorf("find concurrent runs and jobs: %w", err)
}
return len(runs) > 0 || len(jobs) > 0, nil
}
// PrepareToStartRunWithConcurrency prepares a run to start by its evaluated concurrency group and cancelling previous jobs if necessary.
// It returns the new status of the run (either StatusBlocked or StatusWaiting) and any error encountered during the process.
func PrepareToStartRunWithConcurrency(ctx context.Context, run *actions_model.ActionRun) (actions_model.Status, error) {
shouldBlock, err := shouldBlockRunByConcurrency(ctx, run)
if err != nil {
return actions_model.StatusBlocked, err
}
// even if the current run is blocked, we still need to cancel previous "waiting/blocked" jobs in the same concurrency group
jobs, err := actions_model.CancelPreviousJobsByRunConcurrency(ctx, run)
if err != nil {
return actions_model.StatusBlocked, fmt.Errorf("CancelPreviousJobsByRunConcurrency: %w", err)
}
notifyWorkflowJobStatusUpdate(ctx, jobs)
return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), nil
}
func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error {
tasks, err := db.Find[actions_model.ActionTask](ctx, opts)
if err != nil {
@ -95,6 +171,7 @@ func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error {
}
notifyWorkflowJobStatusUpdate(ctx, jobs)
EmitJobsIfReadyByJobs(jobs)
return nil
}
@ -103,7 +180,7 @@ func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error {
func CancelAbandonedJobs(ctx context.Context) error {
jobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{
Statuses: []actions_model.Status{actions_model.StatusWaiting, actions_model.StatusBlocked},
UpdatedBefore: timeutil.TimeStamp(time.Now().Add(-setting.Actions.AbandonedJobTimeout).Unix()),
UpdatedBefore: timeutil.TimeStampNow().AddDuration(-setting.Actions.AbandonedJobTimeout),
})
if err != nil {
log.Warn("find abandoned tasks: %v", err)
@ -114,6 +191,7 @@ func CancelAbandonedJobs(ctx context.Context) error {
// Collect one job per run to send workflow run status update
updatedRuns := map[int64]*actions_model.ActionRunJob{}
updatedJobs := []*actions_model.ActionRunJob{}
for _, job := range jobs {
job.Status = actions_model.StatusCancelled
@ -136,8 +214,12 @@ func CancelAbandonedJobs(ctx context.Context) error {
log.Warn("cancel abandoned job %v: %v", job.ID, err)
// go on
}
CreateCommitStatus(ctx, job)
if job.Run == nil || job.Run.Repo == nil {
continue // error occurs during loading attributes, the following code that depends on "Run.Repo" will fail, so ignore and skip
}
CreateCommitStatusForRunJobs(ctx, job.Run, job)
if updated {
updatedJobs = append(updatedJobs, job)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
}
}
@ -145,6 +227,7 @@ func CancelAbandonedJobs(ctx context.Context) error {
for _, job := range updatedRuns {
notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run)
}
EmitJobsIfReadyByJobs(updatedJobs)
return nil
}

View File

@ -8,14 +8,15 @@ import (
"errors"
"fmt"
"path"
"strconv"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
actions_module "code.gitea.io/gitea/modules/actions"
"code.gitea.io/gitea/modules/commitstatus"
git "code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
webhook_module "code.gitea.io/gitea/modules/webhook"
commitstatus_service "code.gitea.io/gitea/services/repository/commitstatus"
@ -23,38 +24,46 @@ import (
"github.com/nektos/act/pkg/jobparser"
)
// CreateCommitStatus creates a commit status for the given job.
// CreateCommitStatusForRunJobs creates a commit status for the given job if it has a supported event and related commit.
// It won't return an error failed, but will log it, because it's not critical.
func CreateCommitStatus(ctx context.Context, jobs ...*actions_model.ActionRunJob) {
func CreateCommitStatusForRunJobs(ctx context.Context, run *actions_model.ActionRun, jobs ...*actions_model.ActionRunJob) {
// don't create commit status for cron job
if run.ScheduleID != 0 {
return
}
event, commitID, err := getCommitStatusEventNameAndCommitID(run)
if err != nil {
log.Error("GetCommitStatusEventNameAndSHA: %v", err)
}
if event == "" || commitID == "" {
return // unsupported event, or no commit id, or error occurs, do nothing
}
if err = run.LoadAttributes(ctx); err != nil {
log.Error("run.LoadAttributes: %v", err)
return
}
for _, job := range jobs {
if err := createCommitStatus(ctx, job); err != nil {
if err = createCommitStatus(ctx, run.Repo, event, commitID, run, job); err != nil {
log.Error("Failed to create commit status for job %d: %v", job.ID, err)
}
}
}
func createCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) error {
if err := job.LoadAttributes(ctx); err != nil {
return fmt.Errorf("load run: %w", err)
}
run := job.Run
var (
sha string
event string
)
func getCommitStatusEventNameAndCommitID(run *actions_model.ActionRun) (event, commitID string, _ error) {
switch run.Event {
case webhook_module.HookEventPush:
event = "push"
payload, err := run.GetPushEventPayload()
if err != nil {
return fmt.Errorf("GetPushEventPayload: %w", err)
return "", "", fmt.Errorf("GetPushEventPayload: %w", err)
}
if payload.HeadCommit == nil {
return errors.New("head commit is missing in event payload")
return "", "", errors.New("head commit is missing in event payload")
}
sha = payload.HeadCommit.ID
commitID = payload.HeadCommit.ID
case // pull_request
webhook_module.HookEventPullRequest,
webhook_module.HookEventPullRequestSync,
@ -69,32 +78,33 @@ func createCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) er
}
payload, err := run.GetPullRequestEventPayload()
if err != nil {
return fmt.Errorf("GetPullRequestEventPayload: %w", err)
return "", "", fmt.Errorf("GetPullRequestEventPayload: %w", err)
}
if payload.PullRequest == nil {
return errors.New("pull request is missing in event payload")
return "", "", errors.New("pull request is missing in event payload")
} else if payload.PullRequest.Head == nil {
return errors.New("head of pull request is missing in event payload")
return "", "", errors.New("head of pull request is missing in event payload")
}
sha = payload.PullRequest.Head.Sha
commitID = payload.PullRequest.Head.Sha
case webhook_module.HookEventRelease:
event = string(run.Event)
sha = run.CommitSHA
default:
return nil
commitID = run.CommitSHA
default: // do nothing, return empty
}
return event, commitID, nil
}
repo := run.Repo
func createCommitStatus(ctx context.Context, repo *repo_model.Repository, event, commitID string, run *actions_model.ActionRun, job *actions_model.ActionRunJob) error {
// TODO: store workflow name as a field in ActionRun to avoid parsing
runName := path.Base(run.WorkflowID)
if wfs, err := jobparser.Parse(job.WorkflowPayload); err == nil && len(wfs) > 0 {
runName = wfs[0].Name
}
ctxname := fmt.Sprintf("%s / %s (%s)", runName, job.Name, event)
ctxName := fmt.Sprintf("%s / %s (%s)", runName, job.Name, event)
state := toCommitStatus(job.Status)
if statuses, err := git_model.GetLatestCommitStatus(ctx, repo.ID, sha, db.ListOptionsAll); err == nil {
if statuses, err := git_model.GetLatestCommitStatus(ctx, repo.ID, commitID, db.ListOptionsAll); err == nil {
for _, v := range statuses {
if v.Context == ctxname {
if v.Context == ctxName {
if v.State == state {
// no need to update
return nil
@ -106,7 +116,7 @@ func createCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) er
return fmt.Errorf("GetLatestCommitStatus: %w", err)
}
description := ""
var description string
switch job.Status {
// TODO: if we want support description in different languages, we need to support i18n placeholders in it
case actions_model.StatusSuccess:
@ -123,6 +133,8 @@ func createCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) er
description = "Waiting to run"
case actions_model.StatusBlocked:
description = "Blocked by required conditions"
default:
description = "Unknown status: " + strconv.Itoa(int(job.Status))
}
index, err := getIndexOfJob(ctx, job)
@ -131,20 +143,16 @@ func createCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) er
}
creator := user_model.NewActionsUser()
commitID, err := git.NewIDFromString(sha)
if err != nil {
return fmt.Errorf("HashTypeInterfaceFromHashString: %w", err)
}
status := git_model.CommitStatus{
SHA: sha,
SHA: commitID,
TargetURL: fmt.Sprintf("%s/jobs/%d", run.Link(), index),
Description: description,
Context: ctxname,
Context: ctxName,
CreatorID: creator.ID,
State: state,
}
return commitstatus_service.CreateCommitStatus(ctx, repo, creator, commitID.String(), &status)
return commitstatus_service.CreateCommitStatus(ctx, repo, creator, commitID, &status)
}
func toCommitStatus(status actions_model.Status) commitstatus.CommitStatusState {

View File

@ -0,0 +1,115 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/modules/json"
api "code.gitea.io/gitea/modules/structs"
"github.com/nektos/act/pkg/jobparser"
act_model "github.com/nektos/act/pkg/model"
"gopkg.in/yaml.v3"
)
// EvaluateRunConcurrencyFillModel evaluates the expressions in a run-level (workflow) concurrency,
// and fills the run's model fields with `concurrency.group` and `concurrency.cancel-in-progress`.
// Workflow-level concurrency doesn't depend on the job outputs, so it can always be evaluated if there is no syntax error.
// See https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#concurrency
func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, wfRawConcurrency *act_model.RawConcurrency, vars map[string]string) error {
if err := run.LoadAttributes(ctx); err != nil {
return fmt.Errorf("run LoadAttributes: %w", err)
}
actionsRunCtx := GenerateGiteaContext(run, nil)
jobResults := map[string]*jobparser.JobResult{"": {}}
inputs, err := getInputsFromRun(run)
if err != nil {
return fmt.Errorf("get inputs: %w", err)
}
rawConcurrency, err := yaml.Marshal(wfRawConcurrency)
if err != nil {
return fmt.Errorf("marshal raw concurrency: %w", err)
}
run.RawConcurrency = string(rawConcurrency)
run.ConcurrencyGroup, run.ConcurrencyCancel, err = jobparser.EvaluateConcurrency(wfRawConcurrency, "", nil, actionsRunCtx, jobResults, vars, inputs)
if err != nil {
return fmt.Errorf("evaluate concurrency: %w", err)
}
return nil
}
func findJobNeedsAndFillJobResults(ctx context.Context, job *actions_model.ActionRunJob) (map[string]*jobparser.JobResult, error) {
taskNeeds, err := FindTaskNeeds(ctx, job)
if err != nil {
return nil, fmt.Errorf("find task needs: %w", err)
}
jobResults := make(map[string]*jobparser.JobResult, len(taskNeeds))
for jobID, taskNeed := range taskNeeds {
jobResult := &jobparser.JobResult{
Result: taskNeed.Result.String(),
Outputs: taskNeed.Outputs,
}
jobResults[jobID] = jobResult
}
jobResults[job.JobID] = &jobparser.JobResult{
Needs: job.Needs,
}
return jobResults, nil
}
// EvaluateJobConcurrencyFillModel evaluates the expressions in a job-level concurrency,
// and fills the job's model fields with `concurrency.group` and `concurrency.cancel-in-progress`.
// Job-level concurrency may depend on other job's outputs (via `needs`): `concurrency.group: my-group-${{ needs.job1.outputs.out1 }}`
// If the needed jobs haven't been executed yet, this evaluation will also fail.
// See https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#jobsjob_idconcurrency
func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, actionRunJob *actions_model.ActionRunJob, vars map[string]string) error {
if err := actionRunJob.LoadAttributes(ctx); err != nil {
return fmt.Errorf("job LoadAttributes: %w", err)
}
var rawConcurrency act_model.RawConcurrency
if err := yaml.Unmarshal([]byte(actionRunJob.RawConcurrency), &rawConcurrency); err != nil {
return fmt.Errorf("unmarshal raw concurrency: %w", err)
}
actionsJobCtx := GenerateGiteaContext(run, actionRunJob)
jobResults, err := findJobNeedsAndFillJobResults(ctx, actionRunJob)
if err != nil {
return fmt.Errorf("find job needs and fill job results: %w", err)
}
inputs, err := getInputsFromRun(run)
if err != nil {
return fmt.Errorf("get inputs: %w", err)
}
workflowJob, err := actionRunJob.ParseJob()
if err != nil {
return fmt.Errorf("load job %d: %w", actionRunJob.ID, err)
}
actionRunJob.ConcurrencyGroup, actionRunJob.ConcurrencyCancel, err = jobparser.EvaluateConcurrency(&rawConcurrency, actionRunJob.JobID, workflowJob, actionsJobCtx, jobResults, vars, inputs)
if err != nil {
return fmt.Errorf("evaluate concurrency: %w", err)
}
actionRunJob.IsConcurrencyEvaluated = true
return nil
}
func getInputsFromRun(run *actions_model.ActionRun) (map[string]any, error) {
if run.Event != "workflow_dispatch" {
return map[string]any{}, nil
}
var payload api.WorkflowDispatchPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return payload.Inputs, nil
}

View File

@ -10,12 +10,14 @@ import (
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/queue"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/jobparser"
"xorm.io/builder"
)
@ -25,7 +27,7 @@ type jobUpdate struct {
RunID int64
}
func EmitJobsIfReady(runID int64) error {
func EmitJobsIfReadyByRun(runID int64) error {
err := jobEmitterQueue.Push(&jobUpdate{
RunID: runID,
})
@ -35,53 +37,77 @@ func EmitJobsIfReady(runID int64) error {
return err
}
func EmitJobsIfReadyByJobs(jobs []*actions_model.ActionRunJob) {
checkedRuns := make(container.Set[int64])
for _, job := range jobs {
if !job.Status.IsDone() || checkedRuns.Contains(job.RunID) {
continue
}
if err := EmitJobsIfReadyByRun(job.RunID); err != nil {
log.Error("Check jobs of run %d: %v", job.RunID, err)
}
checkedRuns.Add(job.RunID)
}
}
func jobEmitterQueueHandler(items ...*jobUpdate) []*jobUpdate {
ctx := graceful.GetManager().ShutdownContext()
var ret []*jobUpdate
for _, update := range items {
if err := checkJobsOfRun(ctx, update.RunID); err != nil {
if err := checkJobsByRunID(ctx, update.RunID); err != nil {
log.Error("check run %d: %v", update.RunID, err)
ret = append(ret, update)
}
}
return ret
}
func checkJobsOfRun(ctx context.Context, runID int64) error {
jobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: runID})
if err != nil {
return err
func checkJobsByRunID(ctx context.Context, runID int64) error {
run, exist, err := db.GetByID[actions_model.ActionRun](ctx, runID)
if !exist {
return fmt.Errorf("run %d does not exist", runID)
}
var updatedjobs []*actions_model.ActionRunJob
if err != nil {
return fmt.Errorf("get action run: %w", err)
}
var jobs, updatedJobs []*actions_model.ActionRunJob
if err := db.WithTx(ctx, func(ctx context.Context) error {
idToJobs := make(map[string][]*actions_model.ActionRunJob, len(jobs))
for _, job := range jobs {
idToJobs[job.JobID] = append(idToJobs[job.JobID], job)
// check jobs of the current run
if js, ujs, err := checkJobsOfRun(ctx, run); err != nil {
return err
} else {
jobs = append(jobs, js...)
updatedJobs = append(updatedJobs, ujs...)
}
updates := newJobStatusResolver(jobs).Resolve()
for _, job := range jobs {
if status, ok := updates[job.ID]; ok {
job.Status = status
if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil {
return err
} else if n != 1 {
return fmt.Errorf("no affected for updating blocked job %v", job.ID)
}
updatedjobs = append(updatedjobs, job)
}
if js, ujs, err := checkRunConcurrency(ctx, run); err != nil {
return err
} else {
jobs = append(jobs, js...)
updatedJobs = append(updatedJobs, ujs...)
}
return nil
}); err != nil {
return err
}
CreateCommitStatus(ctx, jobs...)
for _, job := range updatedjobs {
CreateCommitStatusForRunJobs(ctx, run, jobs...)
for _, job := range updatedJobs {
_ = job.LoadAttributes(ctx)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil)
}
if len(jobs) > 0 {
runJobs := make(map[int64][]*actions_model.ActionRunJob)
for _, job := range jobs {
runJobs[job.RunID] = append(runJobs[job.RunID], job)
}
runUpdatedJobs := make(map[int64][]*actions_model.ActionRunJob)
for _, uj := range updatedJobs {
runUpdatedJobs[uj.RunID] = append(runUpdatedJobs[uj.RunID], uj)
}
for runID, js := range runJobs {
if len(runUpdatedJobs[runID]) == 0 {
continue
}
runUpdated := true
for _, job := range jobs {
for _, job := range js {
if !job.Status.IsDone() {
runUpdated = false
break
@ -94,6 +120,118 @@ func checkJobsOfRun(ctx context.Context, runID int64) error {
return nil
}
// findBlockedRunByConcurrency finds the blocked concurrent run in a repo and returns `nil, nil` when there is no blocked run.
func findBlockedRunByConcurrency(ctx context.Context, repoID int64, concurrencyGroup string) (*actions_model.ActionRun, error) {
if concurrencyGroup == "" {
return nil, nil
}
cRuns, cJobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, repoID, concurrencyGroup, []actions_model.Status{actions_model.StatusBlocked})
if err != nil {
return nil, fmt.Errorf("find concurrent runs and jobs: %w", err)
}
// There can be at most one blocked run or job
var concurrentRun *actions_model.ActionRun
if len(cRuns) > 0 {
concurrentRun = cRuns[0]
} else if len(cJobs) > 0 {
jobRun, exist, err := db.GetByID[actions_model.ActionRun](ctx, cJobs[0].RunID)
if !exist {
return nil, fmt.Errorf("run %d does not exist", cJobs[0].RunID)
}
if err != nil {
return nil, fmt.Errorf("get run by job %d: %w", cJobs[0].ID, err)
}
concurrentRun = jobRun
}
return concurrentRun, nil
}
func checkRunConcurrency(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs []*actions_model.ActionRunJob, err error) {
checkedConcurrencyGroup := make(container.Set[string])
// check run (workflow-level) concurrency
if run.ConcurrencyGroup != "" {
concurrentRun, err := findBlockedRunByConcurrency(ctx, run.RepoID, run.ConcurrencyGroup)
if err != nil {
return nil, nil, fmt.Errorf("find blocked run by concurrency: %w", err)
}
if concurrentRun != nil && !concurrentRun.NeedApproval {
js, ujs, err := checkJobsOfRun(ctx, concurrentRun)
if err != nil {
return nil, nil, err
}
jobs = append(jobs, js...)
updatedJobs = append(updatedJobs, ujs...)
}
checkedConcurrencyGroup.Add(run.ConcurrencyGroup)
}
// check job concurrency
runJobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
return nil, nil, fmt.Errorf("find run %d jobs: %w", run.ID, err)
}
for _, job := range runJobs {
if !job.Status.IsDone() {
continue
}
if job.ConcurrencyGroup == "" && checkedConcurrencyGroup.Contains(job.ConcurrencyGroup) {
continue
}
concurrentRun, err := findBlockedRunByConcurrency(ctx, job.RepoID, job.ConcurrencyGroup)
if err != nil {
return nil, nil, fmt.Errorf("find blocked run by concurrency: %w", err)
}
if concurrentRun != nil && !concurrentRun.NeedApproval {
js, ujs, err := checkJobsOfRun(ctx, concurrentRun)
if err != nil {
return nil, nil, err
}
jobs = append(jobs, js...)
updatedJobs = append(updatedJobs, ujs...)
}
checkedConcurrencyGroup.Add(job.ConcurrencyGroup)
}
return jobs, updatedJobs, nil
}
func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs []*actions_model.ActionRunJob, err error) {
jobs, err = db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
return nil, nil, err
}
vars, err := actions_model.GetVariablesOfRun(ctx, run)
if err != nil {
return nil, nil, err
}
if err = db.WithTx(ctx, func(ctx context.Context) error {
for _, job := range jobs {
job.Run = run
}
updates := newJobStatusResolver(jobs, vars).Resolve(ctx)
for _, job := range jobs {
if status, ok := updates[job.ID]; ok {
job.Status = status
if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil {
return err
} else if n != 1 {
return fmt.Errorf("no affected for updating blocked job %v", job.ID)
}
updatedJobs = append(updatedJobs, job)
}
}
return nil
}); err != nil {
return nil, nil, err
}
return jobs, updatedJobs, nil
}
func NotifyWorkflowRunStatusUpdateWithReload(ctx context.Context, job *actions_model.ActionRunJob) {
job.Run = nil
if err := job.LoadAttributes(ctx); err != nil {
@ -107,9 +245,10 @@ type jobStatusResolver struct {
statuses map[int64]actions_model.Status
needs map[int64][]int64
jobMap map[int64]*actions_model.ActionRunJob
vars map[string]string
}
func newJobStatusResolver(jobs actions_model.ActionJobList) *jobStatusResolver {
func newJobStatusResolver(jobs actions_model.ActionJobList, vars map[string]string) *jobStatusResolver {
idToJobs := make(map[string][]*actions_model.ActionRunJob, len(jobs))
jobMap := make(map[int64]*actions_model.ActionRunJob)
for _, job := range jobs {
@ -131,13 +270,14 @@ func newJobStatusResolver(jobs actions_model.ActionJobList) *jobStatusResolver {
statuses: statuses,
needs: needs,
jobMap: jobMap,
vars: vars,
}
}
func (r *jobStatusResolver) Resolve() map[int64]actions_model.Status {
func (r *jobStatusResolver) Resolve(ctx context.Context) map[int64]actions_model.Status {
ret := map[int64]actions_model.Status{}
for i := 0; i < len(r.statuses); i++ {
updated := r.resolve()
updated := r.resolve(ctx)
if len(updated) == 0 {
return ret
}
@ -149,43 +289,86 @@ func (r *jobStatusResolver) Resolve() map[int64]actions_model.Status {
return ret
}
func (r *jobStatusResolver) resolve() map[int64]actions_model.Status {
func (r *jobStatusResolver) resolveCheckNeeds(id int64) (allDone, allSucceed bool) {
allDone, allSucceed = true, true
for _, need := range r.needs[id] {
needStatus := r.statuses[need]
if !needStatus.IsDone() {
allDone = false
}
if needStatus.In(actions_model.StatusFailure, actions_model.StatusCancelled, actions_model.StatusSkipped) {
allSucceed = false
}
}
return allDone, allSucceed
}
func (r *jobStatusResolver) resolveJobHasIfCondition(actionRunJob *actions_model.ActionRunJob) (hasIf bool) {
// FIXME evaluate this on the server side
if job, err := actionRunJob.ParseJob(); err == nil {
return len(job.If.Value) > 0
}
return hasIf
}
func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model.Status {
ret := map[int64]actions_model.Status{}
for id, status := range r.statuses {
actionRunJob := r.jobMap[id]
if status != actions_model.StatusBlocked {
continue
}
allDone, allSucceed := true, true
for _, need := range r.needs[id] {
needStatus := r.statuses[need]
if !needStatus.IsDone() {
allDone = false
}
if needStatus.In(actions_model.StatusFailure, actions_model.StatusCancelled, actions_model.StatusSkipped) {
allSucceed = false
allDone, allSucceed := r.resolveCheckNeeds(id)
if !allDone {
continue
}
// update concurrency and check whether the job can run now
err := updateConcurrencyEvaluationForJobWithNeeds(ctx, actionRunJob, r.vars)
if err != nil {
// The err can be caused by different cases: database error, or syntax error, or the needed jobs haven't completed
// At the moment there is no way to distinguish them.
// Actually, for most cases, the error is caused by "syntax error" / "the needed jobs haven't completed (skipped?)"
// TODO: if workflow or concurrency expression has syntax error, there should be a user error message, need to show it to end users
log.Debug("updateConcurrencyEvaluationForJobWithNeeds failed, this job will stay blocked: job: %d, err: %v", id, err)
continue
}
shouldStartJob := true
if !allSucceed {
// Not all dependent jobs completed successfully:
// * if the job has "if" condition, it can be started, then the act_runner will evaluate the "if" condition.
// * otherwise, the job should be skipped.
shouldStartJob = r.resolveJobHasIfCondition(actionRunJob)
}
newStatus := util.Iif(shouldStartJob, actions_model.StatusWaiting, actions_model.StatusSkipped)
if newStatus == actions_model.StatusWaiting {
newStatus, err = PrepareToStartJobWithConcurrency(ctx, actionRunJob)
if err != nil {
log.Error("ShouldBlockJobByConcurrency failed, this job will stay blocked: job: %d, err: %v", id, err)
}
}
if allDone {
if allSucceed {
ret[id] = actions_model.StatusWaiting
} else {
// Check if the job has an "if" condition
hasIf := false
if wfJobs, _ := jobparser.Parse(r.jobMap[id].WorkflowPayload); len(wfJobs) == 1 {
_, wfJob := wfJobs[0].Job()
hasIf = len(wfJob.If.Value) > 0
}
if hasIf {
// act_runner will check the "if" condition
ret[id] = actions_model.StatusWaiting
} else {
// If the "if" condition is empty and not all dependent jobs completed successfully,
// the job should be skipped.
ret[id] = actions_model.StatusSkipped
}
}
if newStatus != actions_model.StatusBlocked {
ret[id] = newStatus
}
}
return ret
}
func updateConcurrencyEvaluationForJobWithNeeds(ctx context.Context, actionRunJob *actions_model.ActionRunJob, vars map[string]string) error {
if setting.IsInTesting && actionRunJob.RepoID == 0 {
return nil // for testing purpose only, no repo, no evaluation
}
err := EvaluateJobConcurrencyFillModel(ctx, actionRunJob.Run, actionRunJob, vars)
if err != nil {
return fmt.Errorf("evaluate job concurrency: %w", err)
}
if _, err := actions_model.UpdateRunJob(ctx, actionRunJob, nil, "concurrency_group", "concurrency_cancel", "is_concurrency_evaluated"); err != nil {
return fmt.Errorf("update run job: %w", err)
}
return nil
}

View File

@ -129,8 +129,8 @@ jobs:
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := newJobStatusResolver(tt.jobs)
assert.Equal(t, tt.want, r.Resolve())
r := newJobStatusResolver(tt.jobs, nil)
assert.Equal(t, tt.want, r.Resolve(t.Context()))
})
}
}

View File

@ -27,9 +27,7 @@ import (
api "code.gitea.io/gitea/modules/structs"
webhook_module "code.gitea.io/gitea/modules/webhook"
"code.gitea.io/gitea/services/convert"
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/jobparser"
"github.com/nektos/act/pkg/model"
)
@ -346,66 +344,10 @@ func handleWorkflows(
run.NeedApproval = need
if err := run.LoadAttributes(ctx); err != nil {
log.Error("LoadAttributes: %v", err)
if err := PrepareRunAndInsert(ctx, dwf.Content, run, nil); err != nil {
log.Error("PrepareRunAndInsert: %v", err)
continue
}
vars, err := actions_model.GetVariablesOfRun(ctx, run)
if err != nil {
log.Error("GetVariablesOfRun: %v", err)
continue
}
giteaCtx := GenerateGiteaContext(run, nil)
jobs, err := jobparser.Parse(dwf.Content, jobparser.WithVars(vars), jobparser.WithGitContext(giteaCtx.ToGitHubContext()))
if err != nil {
log.Error("jobparser.Parse: %v", err)
continue
}
if len(jobs) > 0 && jobs[0].RunName != "" {
run.Title = jobs[0].RunName
}
// cancel running jobs if the event is push or pull_request_sync
if run.Event == webhook_module.HookEventPush ||
run.Event == webhook_module.HookEventPullRequestSync {
if err := CancelPreviousJobs(
ctx,
run.RepoID,
run.Ref,
run.WorkflowID,
run.Event,
); err != nil {
log.Error("CancelPreviousJobs: %v", err)
}
}
if err := actions_model.InsertRun(ctx, run, jobs); err != nil {
log.Error("InsertRun: %v", err)
continue
}
alljobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
log.Error("FindRunJobs: %v", err)
continue
}
CreateCommitStatus(ctx, alljobs...)
if len(alljobs) > 0 {
job := alljobs[0]
err := job.LoadRun(ctx)
if err != nil {
log.Error("LoadRun: %v", err)
continue
}
notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run)
}
for _, job := range alljobs {
notify_service.WorkflowJobStatusUpdate(ctx, input.Repo, input.Doer, job, nil)
}
}
return nil
}
@ -561,24 +503,6 @@ func handleSchedules(
Content: dwf.Content,
}
vars, err := actions_model.GetVariablesOfRun(ctx, run.ToActionRun())
if err != nil {
log.Error("GetVariablesOfRun: %v", err)
continue
}
giteaCtx := GenerateGiteaContext(run.ToActionRun(), nil)
jobs, err := jobparser.Parse(dwf.Content, jobparser.WithVars(vars), jobparser.WithGitContext(giteaCtx.ToGitHubContext()))
if err != nil {
log.Error("jobparser.Parse: %v", err)
continue
}
if len(jobs) > 0 && jobs[0].RunName != "" {
run.Title = jobs[0].RunName
}
crons = append(crons, run)
}

178
services/actions/run.go Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/util"
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/jobparser"
"gopkg.in/yaml.v3"
)
// PrepareRunAndInsert prepares a run and inserts it into the database
// It parses the workflow content, evaluates concurrency if needed, and inserts the run and its jobs into the database.
// The title will be cut off at 255 characters if it's longer than 255 characters.
func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model.ActionRun, inputsWithDefaults map[string]any) error {
if err := run.LoadAttributes(ctx); err != nil {
return fmt.Errorf("LoadAttributes: %w", err)
}
vars, err := actions_model.GetVariablesOfRun(ctx, run)
if err != nil {
return fmt.Errorf("GetVariablesOfRun: %w", err)
}
wfRawConcurrency, err := jobparser.ReadWorkflowRawConcurrency(content)
if err != nil {
return fmt.Errorf("ReadWorkflowRawConcurrency: %w", err)
}
if wfRawConcurrency != nil {
err = EvaluateRunConcurrencyFillModel(ctx, run, wfRawConcurrency, vars)
if err != nil {
return fmt.Errorf("EvaluateRunConcurrencyFillModel: %w", err)
}
}
giteaCtx := GenerateGiteaContext(run, nil)
jobs, err := jobparser.Parse(content, jobparser.WithVars(vars), jobparser.WithGitContext(giteaCtx.ToGitHubContext()), jobparser.WithInputs(inputsWithDefaults))
if err != nil {
return fmt.Errorf("parse workflow: %w", err)
}
if len(jobs) > 0 && jobs[0].RunName != "" {
run.Title = jobs[0].RunName
}
if err = InsertRun(ctx, run, jobs, vars); err != nil {
return fmt.Errorf("InsertRun: %w", err)
}
// Load the newly inserted jobs with all fields from database (the job models in InsertRun are partial, so load again)
allJobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
return fmt.Errorf("FindRunJob: %w", err)
}
CreateCommitStatusForRunJobs(ctx, run, allJobs...)
notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run)
for _, job := range allJobs {
notify_service.WorkflowJobStatusUpdate(ctx, run.Repo, run.TriggerUser, job, nil)
}
return nil
}
// InsertRun inserts a run
// The title will be cut off at 255 characters if it's longer than 255 characters.
func InsertRun(ctx context.Context, run *actions_model.ActionRun, jobs []*jobparser.SingleWorkflow, vars map[string]string) error {
return db.WithTx(ctx, func(ctx context.Context) error {
index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID)
if err != nil {
return err
}
run.Index = index
run.Title = util.EllipsisDisplayString(run.Title, 255)
// check run (workflow-level) concurrency
run.Status, err = PrepareToStartRunWithConcurrency(ctx, run)
if err != nil {
return err
}
if err := db.Insert(ctx, run); err != nil {
return err
}
if err := run.LoadRepo(ctx); err != nil {
return err
}
if err := actions_model.UpdateRepoRunsNumbers(ctx, run.Repo); err != nil {
return err
}
runJobs := make([]*actions_model.ActionRunJob, 0, len(jobs))
var hasWaitingJobs bool
for _, v := range jobs {
id, job := v.Job()
needs := job.Needs()
if err := v.SetJob(id, job.EraseNeeds()); err != nil {
return err
}
payload, _ := v.Marshal()
shouldBlockJob := len(needs) > 0 || run.NeedApproval || run.Status == actions_model.StatusBlocked
job.Name = util.EllipsisDisplayString(job.Name, 255)
runJob := &actions_model.ActionRunJob{
RunID: run.ID,
RepoID: run.RepoID,
OwnerID: run.OwnerID,
CommitSHA: run.CommitSHA,
IsForkPullRequest: run.IsForkPullRequest,
Name: job.Name,
WorkflowPayload: payload,
JobID: id,
Needs: needs,
RunsOn: job.RunsOn(),
Status: util.Iif(shouldBlockJob, actions_model.StatusBlocked, actions_model.StatusWaiting),
}
// check job concurrency
if job.RawConcurrency != nil {
rawConcurrency, err := yaml.Marshal(job.RawConcurrency)
if err != nil {
return fmt.Errorf("marshal raw concurrency: %w", err)
}
runJob.RawConcurrency = string(rawConcurrency)
// do not evaluate job concurrency when it requires `needs`, the jobs with `needs` will be evaluated later by job emitter
if len(needs) == 0 {
err = EvaluateJobConcurrencyFillModel(ctx, run, runJob, vars)
if err != nil {
return fmt.Errorf("evaluate job concurrency: %w", err)
}
}
// If a job needs other jobs ("needs" is not empty), its status is set to StatusBlocked at the entry of the loop
// No need to check job concurrency for a blocked job (it will be checked by job emitter later)
if runJob.Status == actions_model.StatusWaiting {
runJob.Status, err = PrepareToStartJobWithConcurrency(ctx, runJob)
if err != nil {
return fmt.Errorf("prepare to start job with concurrency: %w", err)
}
}
}
hasWaitingJobs = hasWaitingJobs || runJob.Status == actions_model.StatusWaiting
if err := db.Insert(ctx, runJob); err != nil {
return err
}
runJobs = append(runJobs, runJob)
}
run.Status = actions_model.AggregateJobStatus(runJobs)
if err := actions_model.UpdateRun(ctx, run, "status"); err != nil {
return err
}
// if there is a job in the waiting status, increase tasks version.
if hasWaitingJobs {
if err := actions_model.IncreaseTaskVersion(ctx, run.OwnerID, run.RepoID); err != nil {
return err
}
}
return nil
})
}

View File

@ -15,9 +15,6 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
webhook_module "code.gitea.io/gitea/modules/webhook"
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/jobparser"
)
// StartScheduleTasks start the task
@ -53,20 +50,6 @@ func startTasks(ctx context.Context) error {
// Loop through each spec and create a schedule task for it
for _, row := range specs {
// cancel running jobs if the event is push
if row.Schedule.Event == webhook_module.HookEventPush {
// cancel running jobs of the same workflow
if err := CancelPreviousJobs(
ctx,
row.RepoID,
row.Schedule.Ref,
row.Schedule.WorkflowID,
webhook_module.HookEventSchedule,
); err != nil {
log.Error("CancelPreviousJobs: %v", err)
}
}
if row.Repo.IsArchived {
// Skip if the repo is archived
continue
@ -133,34 +116,12 @@ func CreateScheduleTask(ctx context.Context, cron *actions_model.ActionSchedule)
Status: actions_model.StatusWaiting,
}
vars, err := actions_model.GetVariablesOfRun(ctx, run)
if err != nil {
log.Error("GetVariablesOfRun: %v", err)
return err
}
// Parse the workflow specification from the cron schedule
workflows, err := jobparser.Parse(cron.Content, jobparser.WithVars(vars))
if err != nil {
return err
}
// FIXME cron.Content might be outdated if the workflow file has been changed.
// Load the latest sha from default branch
// Insert the action run and its associated jobs into the database
if err := actions_model.InsertRun(ctx, run, workflows); err != nil {
if err := PrepareRunAndInsert(ctx, cron.Content, run, nil); err != nil {
return err
}
allJobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
log.Error("FindRunJobs: %v", err)
}
err = run.LoadAttributes(ctx)
if err != nil {
log.Error("LoadAttributes: %v", err)
}
notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run)
for _, job := range allJobs {
notify_service.WorkflowJobStatusUpdate(ctx, run.Repo, run.TriggerUser, job, nil)
}
// Return nil if no errors occurred
return nil

View File

@ -97,7 +97,7 @@ func PickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv
return nil, false, nil
}
CreateCommitStatus(ctx, job)
CreateCommitStatusForRunJobs(ctx, job.Run, job)
notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, actionTask)
return task, true, nil

View File

@ -8,7 +8,6 @@ import (
"strings"
actions_model "code.gitea.io/gitea/models/actions"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/perm"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
@ -16,13 +15,11 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/actions"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/reqctx"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/convert"
notify_service "code.gitea.io/gitea/services/notify"
"github.com/nektos/act/pkg/jobparser"
"github.com/nektos/act/pkg/model"
@ -99,7 +96,6 @@ func DispatchActionWorkflow(ctx reqctx.RequestContext, doer *user_model.User, re
}
// find workflow from commit
var workflows []*jobparser.SingleWorkflow
var entry *git.TreeEntry
run := &actions_model.ActionRun{
@ -153,24 +149,6 @@ func DispatchActionWorkflow(ctx reqctx.RequestContext, doer *user_model.User, re
}
}
giteaCtx := GenerateGiteaContext(run, nil)
workflows, err = jobparser.Parse(content, jobparser.WithGitContext(giteaCtx.ToGitHubContext()), jobparser.WithInputs(inputsWithDefaults))
if err != nil {
return err
}
if len(workflows) > 0 && workflows[0].RunName != "" {
run.Title = workflows[0].RunName
}
if len(workflows) == 0 {
return util.ErrorWrapLocale(
util.NewNotExistErrorf("workflow %q doesn't exist", workflowID),
"actions.workflow.not_found", workflowID,
)
}
// ctx.Req.PostForm -> WorkflowDispatchPayload.Inputs -> ActionRun.EventPayload -> runner: ghc.Event
// https://docs.github.com/en/actions/learn-github-actions/contexts#github-context
// https://docs.github.com/en/webhooks/webhook-events-and-payloads#workflow_dispatch
@ -188,38 +166,9 @@ func DispatchActionWorkflow(ctx reqctx.RequestContext, doer *user_model.User, re
}
run.EventPayload = string(eventPayload)
// cancel running jobs of the same workflow
if err := CancelPreviousJobs(
ctx,
run.RepoID,
run.Ref,
run.WorkflowID,
run.Event,
); err != nil {
log.Error("CancelRunningJobs: %v", err)
}
// Insert the action run and its associated jobs into the database
if err := actions_model.InsertRun(ctx, run, workflows); err != nil {
return fmt.Errorf("InsertRun: %w", err)
}
allJobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID})
if err != nil {
log.Error("FindRunJobs: %v", err)
}
CreateCommitStatus(ctx, allJobs...)
if len(allJobs) > 0 {
job := allJobs[0]
err := job.LoadRun(ctx)
if err != nil {
log.Error("LoadRun: %v", err)
} else {
notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run)
}
}
for _, job := range allJobs {
notify_service.WorkflowJobStatusUpdate(ctx, repo, doer, job, nil)
if err := PrepareRunAndInsert(ctx, content, run, inputsWithDefaults); err != nil {
return fmt.Errorf("PrepareRun: %w", err)
}
return nil
}

View File

@ -17,7 +17,6 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process"
@ -109,54 +108,9 @@ func IsErrWontSign(err error) bool {
return ok
}
// SigningKey returns the KeyID and git Signature for the repo
func SigningKey(ctx context.Context, repoPath string) (*git.SigningKey, *git.Signature) {
if setting.Repository.Signing.SigningKey == "none" {
return nil, nil
}
if setting.Repository.Signing.SigningKey == "default" || setting.Repository.Signing.SigningKey == "" {
// Can ignore the error here as it means that commit.gpgsign is not set
value, _, _ := gitcmd.NewCommand("config", "--get", "commit.gpgsign").WithDir(repoPath).RunStdString(ctx)
sign, valid := git.ParseBool(strings.TrimSpace(value))
if !sign || !valid {
return nil, nil
}
format, _, _ := gitcmd.NewCommand("config", "--default", git.SigningKeyFormatOpenPGP, "--get", "gpg.format").WithDir(repoPath).RunStdString(ctx)
signingKey, _, _ := gitcmd.NewCommand("config", "--get", "user.signingkey").WithDir(repoPath).RunStdString(ctx)
signingName, _, _ := gitcmd.NewCommand("config", "--get", "user.name").WithDir(repoPath).RunStdString(ctx)
signingEmail, _, _ := gitcmd.NewCommand("config", "--get", "user.email").WithDir(repoPath).RunStdString(ctx)
if strings.TrimSpace(signingKey) == "" {
return nil, nil
}
return &git.SigningKey{
KeyID: strings.TrimSpace(signingKey),
Format: strings.TrimSpace(format),
}, &git.Signature{
Name: strings.TrimSpace(signingName),
Email: strings.TrimSpace(signingEmail),
}
}
if setting.Repository.Signing.SigningKey == "" {
return nil, nil
}
return &git.SigningKey{
KeyID: setting.Repository.Signing.SigningKey,
Format: setting.Repository.Signing.SigningFormat,
}, &git.Signature{
Name: setting.Repository.Signing.SigningName,
Email: setting.Repository.Signing.SigningEmail,
}
}
// PublicSigningKey gets the public signing key within a provided repository directory
func PublicSigningKey(ctx context.Context, repoPath string) (content, format string, err error) {
signingKey, _ := SigningKey(ctx, repoPath)
signingKey, _ := git.GetSigningKey(ctx, repoPath)
if signingKey == nil {
return "", "", nil
}
@ -181,7 +135,7 @@ func PublicSigningKey(ctx context.Context, repoPath string) (content, format str
// SignInitialCommit determines if we should sign the initial commit to this repository
func SignInitialCommit(ctx context.Context, repoPath string, u *user_model.User) (bool, *git.SigningKey, *git.Signature, error) {
rules := signingModeFromStrings(setting.Repository.Signing.InitialCommit)
signingKey, sig := SigningKey(ctx, repoPath)
signingKey, sig := git.GetSigningKey(ctx, repoPath)
if signingKey == nil {
return false, nil, nil, &ErrWontSign{noKey}
}
@ -216,9 +170,8 @@ Loop:
// SignWikiCommit determines if we should sign the commits to this repository wiki
func SignWikiCommit(ctx context.Context, repo *repo_model.Repository, u *user_model.User) (bool, *git.SigningKey, *git.Signature, error) {
repoWikiPath := repo.WikiPath()
rules := signingModeFromStrings(setting.Repository.Signing.Wiki)
signingKey, sig := SigningKey(ctx, repoWikiPath)
signingKey, sig := gitrepo.GetSigningKey(ctx, repo.WikiStorageRepo())
if signingKey == nil {
return false, nil, nil, &ErrWontSign{noKey}
}
@ -271,7 +224,7 @@ Loop:
// SignCRUDAction determines if we should sign a CRUD commit to this repository
func SignCRUDAction(ctx context.Context, repoPath string, u *user_model.User, tmpBasePath, parentCommit string) (bool, *git.SigningKey, *git.Signature, error) {
rules := signingModeFromStrings(setting.Repository.Signing.CRUDActions)
signingKey, sig := SigningKey(ctx, repoPath)
signingKey, sig := git.GetSigningKey(ctx, repoPath)
if signingKey == nil {
return false, nil, nil, &ErrWontSign{noKey}
}
@ -335,7 +288,7 @@ func SignMerge(ctx context.Context, pr *issues_model.PullRequest, u *user_model.
}
repo := pr.BaseRepo
signingKey, signer := SigningKey(ctx, repo.RepoPath())
signingKey, signer := gitrepo.GetSigningKey(ctx, repo)
if signingKey == nil {
return false, nil, nil, &ErrWontSign{noKey}
}

View File

@ -15,6 +15,7 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
git_service "code.gitea.io/gitea/services/git"
notify_service "code.gitea.io/gitea/services/notify"
@ -151,15 +152,15 @@ func DeleteComment(ctx context.Context, doer *user_model.User, comment *issues_m
}
// LoadCommentPushCommits Load push commits
func LoadCommentPushCommits(ctx context.Context, c *issues_model.Comment) (err error) {
func LoadCommentPushCommits(ctx context.Context, c *issues_model.Comment) error {
if c.Content == "" || c.Commits != nil || c.Type != issues_model.CommentTypePullRequestPush {
return nil
}
var data issues_model.PushActionContent
err = json.Unmarshal([]byte(c.Content), &data)
if err != nil {
return err
if err := json.Unmarshal([]byte(c.Content), &data); err != nil {
log.Debug("Unmarshal: %v", err) // no need to show 500 error to end user when the JSON is broken
return nil
}
c.IsForcePush = data.IsForcePush
@ -168,9 +169,15 @@ func LoadCommentPushCommits(ctx context.Context, c *issues_model.Comment) (err e
if len(data.CommitIDs) != 2 {
return nil
}
c.OldCommit = data.CommitIDs[0]
c.NewCommit = data.CommitIDs[1]
c.OldCommit, c.NewCommit = data.CommitIDs[0], data.CommitIDs[1]
} else {
if err := c.LoadIssue(ctx); err != nil {
return err
}
if err := c.Issue.LoadRepo(ctx); err != nil {
return err
}
gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, c.Issue.Repo)
if err != nil {
return err
@ -179,10 +186,11 @@ func LoadCommentPushCommits(ctx context.Context, c *issues_model.Comment) (err e
c.Commits, err = git_service.ConvertFromGitCommit(ctx, gitRepo.GetCommitsFromIDs(data.CommitIDs), c.Issue.Repo, gitRepo)
if err != nil {
return err
log.Debug("ConvertFromGitCommit: %v", err) // no need to show 500 error to end user when the commit does not exist
} else {
c.CommitsNum = int64(len(c.Commits))
}
c.CommitsNum = int64(len(c.Commits))
}
return err
return nil
}

View File

@ -249,8 +249,6 @@ func checkRecoverableSyncError(stderrMessage string) bool {
// runSync returns true if sync finished without error.
func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bool) {
repoPath := m.Repo.RepoPath()
wikiPath := m.Repo.WikiPath()
timeout := time.Duration(setting.Git.Timeout.Mirror) * time.Second
log.Trace("SyncMirrors [repo: %-v]: running git remote update...", m.Repo)
@ -311,7 +309,7 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
// If there is still an error (or there always was an error)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: failed to update mirror repository:\nStdout: %s\nStderr: %s\nErr: %v", m.Repo, stdoutMessage, stderrMessage, err)
desc := fmt.Sprintf("Failed to update mirror repository '%s': %s", repoPath, stderrMessage)
desc := fmt.Sprintf("Failed to update mirror repository '%s': %s", m.Repo.RelativePath(), stderrMessage)
if err = system_model.CreateRepositoryNotice(desc); err != nil {
log.Error("CreateRepositoryNotice: %v", err)
}
@ -320,7 +318,7 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
}
output := stderrBuilder.String()
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
if err := gitrepo.WriteCommitGraph(ctx, m.Repo); err != nil {
log.Error("SyncMirrors [repo: %-v]: %v", m.Repo, err)
}
@ -394,14 +392,14 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
// If there is still an error (or there always was an error)
if err != nil {
log.Error("SyncMirrors [repo: %-v Wiki]: failed to update mirror repository wiki:\nStdout: %s\nStderr: %s\nErr: %v", m.Repo, stdoutMessage, stderrMessage, err)
desc := fmt.Sprintf("Failed to update mirror repository wiki '%s': %s", wikiPath, stderrMessage)
desc := fmt.Sprintf("Failed to update mirror repository wiki '%s': %s", m.Repo.WikiStorageRepo().RelativePath(), stderrMessage)
if err = system_model.CreateRepositoryNotice(desc); err != nil {
log.Error("CreateRepositoryNotice: %v", err)
}
return nil, false
}
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
if err := gitrepo.WriteCommitGraph(ctx, m.Repo.WikiStorageRepo()); err != nil {
log.Error("SyncMirrors [repo: %-v]: %v", m.Repo, err)
}
}

View File

@ -124,14 +124,12 @@ func runPushSync(ctx context.Context, m *repo_model.PushMirror) error {
performPush := func(repo *repo_model.Repository, isWiki bool) error {
var storageRepo gitrepo.Repository = repo
path := repo.RepoPath()
if isWiki {
storageRepo = repo.WikiStorageRepo()
path = repo.WikiPath()
}
remoteURL, err := gitrepo.GitRemoteGetURL(ctx, storageRepo, m.RemoteName)
if err != nil {
log.Error("GetRemoteURL(%s) Error %v", path, err)
log.Error("GetRemoteURL(%s) Error %v", storageRepo.RelativePath(), err)
return errors.New("Unexpected error")
}
@ -152,17 +150,17 @@ func runPushSync(ctx context.Context, m *repo_model.PushMirror) error {
}
}
log.Trace("Pushing %s mirror[%d] remote %s", path, m.ID, m.RemoteName)
log.Trace("Pushing %s mirror[%d] remote %s", storageRepo.RelativePath(), m.ID, m.RemoteName)
envs := proxy.EnvWithProxy(remoteURL.URL)
if err := git.Push(ctx, path, git.PushOptions{
if err := gitrepo.Push(ctx, storageRepo, git.PushOptions{
Remote: m.RemoteName,
Force: true,
Mirror: true,
Timeout: timeout,
Env: envs,
}); err != nil {
log.Error("Error pushing %s mirror[%d] remote %s: %v", path, m.ID, m.RemoteName, err)
log.Error("Error pushing %s mirror[%d] remote %s: %v", storageRepo.RelativePath(), m.ID, m.RemoteName, err)
return util.SanitizeErrorCredentialURLs(err)
}

View File

@ -248,6 +248,11 @@ func Merge(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.U
}
defer releaser()
defer func() {
// This is a duplicated call to AddTestPullRequestTask (it will also be called by the post-receive hook, via a push queue).
// This call will do some operations (push to base repo, sync commit divergence, add PR conflict check queue task, etc)
// immediately instead of waiting for the "push queue"'s task. The code is from https://github.com/go-gitea/gitea/pull/7082.
// But it's really questionable whether it's worth to do it ahead without waiting for the "push queue" task to run.
// TODO: DUPLICATE-PR-TASK: maybe can try to remove this in 1.26 to see if there is any issue.
go AddTestPullRequestTask(TestPullRequestOptions{
RepoID: pr.BaseRepo.ID,
Doer: doer,

View File

@ -32,6 +32,9 @@ type mergeContext struct {
env []string
}
// PrepareGitCmd prepares a git command with the correct directory, environment, and output buffers
// This function can only be called with gitcmd.Run()
// Do NOT use it with gitcmd.RunStd*() functions, otherwise it will panic
func (ctx *mergeContext) PrepareGitCmd(cmd *gitcmd.Command) *gitcmd.Command {
ctx.outbuf.Reset()
ctx.errbuf.Reset()
@ -73,7 +76,11 @@ func createTemporaryRepoForMerge(ctx context.Context, pr *issues_model.PullReque
}
if expectedHeadCommitID != "" {
trackingCommitID, _, err := mergeCtx.PrepareGitCmd(gitcmd.NewCommand("show-ref", "--hash").AddDynamicArguments(git.BranchPrefix + trackingBranch)).RunStdString(ctx)
trackingCommitID, _, err := gitcmd.NewCommand("show-ref", "--hash").
AddDynamicArguments(git.BranchPrefix + trackingBranch).
WithEnv(mergeCtx.env).
WithDir(mergeCtx.tmpBasePath).
RunStdString(ctx)
if err != nil {
defer cancel()
log.Error("failed to get sha of head branch in %-v: show-ref[%s] --hash refs/heads/tracking: %v", mergeCtx.pr, mergeCtx.tmpBasePath, err)

View File

@ -374,10 +374,8 @@ type TestPullRequestOptions struct {
func AddTestPullRequestTask(opts TestPullRequestOptions) {
log.Trace("AddTestPullRequestTask [head_repo_id: %d, head_branch: %s]: finding pull requests", opts.RepoID, opts.Branch)
graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
// There is no sensible way to shut this down ":-("
// If you don't let it run all the way then you will lose data
// TODO: graceful: AddTestPullRequestTask needs to become a queue!
// this function does a lot of operations to various models, if the process gets killed in the middle,
// there is no way to recover at the moment. The best workaround is to let end user push again.
repo, err := repo_model.GetRepositoryByID(ctx, opts.RepoID)
if err != nil {
log.Error("GetRepositoryByID: %v", err)
@ -402,11 +400,15 @@ func AddTestPullRequestTask(opts TestPullRequestOptions) {
continue
}
StartPullRequestCheckImmediately(ctx, pr)
// create push comment before check pull request status,
// then when the status is mergeable, the comment is already in database, to make testing easy and stable
comment, err := CreatePushPullComment(ctx, opts.Doer, pr, opts.OldCommitID, opts.NewCommitID, opts.IsForcePush)
if err == nil && comment != nil {
notify_service.PullRequestPushCommits(ctx, opts.Doer, pr, comment)
}
// The caller can be in a goroutine or a "push queue", "conflict check" can be time-consuming,
// and the concurrency should be limited, so the conflict check will be done in another queue
StartPullRequestCheckImmediately(ctx, pr)
}
if opts.IsSync {

View File

@ -37,6 +37,9 @@ type prTmpRepoContext struct {
errbuf *strings.Builder // any use should be preceded by a Reset and preferably after use
}
// PrepareGitCmd prepares a git command with the correct directory, environment, and output buffers
// This function can only be called with gitcmd.Run()
// Do NOT use it with gitcmd.RunStd*() functions, otherwise it will panic
func (ctx *prTmpRepoContext) PrepareGitCmd(cmd *gitcmd.Command) *gitcmd.Command {
ctx.outbuf.Reset()
ctx.errbuf.Reset()

View File

@ -63,6 +63,9 @@ func Update(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.
}
defer func() {
// The code is from https://github.com/go-gitea/gitea/pull/9784,
// it seems a simple copy-paste from https://github.com/go-gitea/gitea/pull/7082 without a real reason.
// TODO: DUPLICATE-PR-TASK: search and see another TODO comment for more details
go AddTestPullRequestTask(TestPullRequestOptions{
RepoID: pr.BaseRepo.ID,
Doer: doer,

View File

@ -28,22 +28,23 @@ import (
)
func cloneWiki(ctx context.Context, repo *repo_model.Repository, opts migration.MigrateOptions, migrateTimeout time.Duration) (string, error) {
wikiPath := repo.WikiPath()
wikiRemotePath := repo_module.WikiRemoteURL(ctx, opts.CloneAddr)
if wikiRemotePath == "" {
wikiRemoteURL := repo_module.WikiRemoteURL(ctx, opts.CloneAddr)
if wikiRemoteURL == "" {
return "", nil
}
if err := util.RemoveAll(wikiPath); err != nil {
return "", fmt.Errorf("failed to remove existing wiki dir %q, err: %w", wikiPath, err)
storageRepo := repo.WikiStorageRepo()
if err := gitrepo.DeleteRepository(ctx, storageRepo); err != nil {
return "", fmt.Errorf("failed to remove existing wiki dir %q, err: %w", storageRepo.RelativePath(), err)
}
cleanIncompleteWikiPath := func() {
if err := util.RemoveAll(wikiPath); err != nil {
log.Error("Failed to remove incomplete wiki dir %q, err: %v", wikiPath, err)
if err := gitrepo.DeleteRepository(ctx, storageRepo); err != nil {
log.Error("Failed to remove incomplete wiki dir %q, err: %v", storageRepo.RelativePath(), err)
}
}
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
if err := gitrepo.CloneExternalRepo(ctx, wikiRemoteURL, storageRepo, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
@ -54,15 +55,15 @@ func cloneWiki(ctx context.Context, repo *repo_model.Repository, opts migration.
return "", err
}
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
if err := gitrepo.WriteCommitGraph(ctx, storageRepo); err != nil {
cleanIncompleteWikiPath()
return "", err
}
defaultBranch, err := gitrepo.GetDefaultBranch(ctx, repo.WikiStorageRepo())
defaultBranch, err := gitrepo.GetDefaultBranch(ctx, storageRepo)
if err != nil {
cleanIncompleteWikiPath()
return "", fmt.Errorf("failed to get wiki repo default branch for %q, err: %w", wikiPath, err)
return "", fmt.Errorf("failed to get wiki repo default branch for %q, err: %w", storageRepo.RelativePath(), err)
}
return defaultBranch, nil

View File

@ -107,16 +107,18 @@ func transferOwnership(ctx context.Context, doer *user_model.User, newOwnerName
}
if repoRenamed {
if err := util.Rename(repo_model.RepoPath(newOwnerName, repo.Name), repo_model.RepoPath(oldOwnerName, repo.Name)); err != nil {
oldRelativePath, newRelativePath := repo_model.RelativePath(newOwnerName, repo.Name), repo_model.RelativePath(oldOwnerName, repo.Name)
if err := gitrepo.RenameRepository(ctx, repo_model.StorageRepo(oldRelativePath), repo_model.StorageRepo(newRelativePath)); err != nil {
log.Critical("Unable to move repository %s/%s directory from %s back to correct place %s: %v", oldOwnerName, repo.Name,
repo_model.RepoPath(newOwnerName, repo.Name), repo_model.RepoPath(oldOwnerName, repo.Name), err)
oldRelativePath, newRelativePath, err)
}
}
if wikiRenamed {
if err := util.Rename(repo_model.WikiPath(newOwnerName, repo.Name), repo_model.WikiPath(oldOwnerName, repo.Name)); err != nil {
oldRelativePath, newRelativePath := repo_model.RelativeWikiPath(newOwnerName, repo.Name), repo_model.RelativeWikiPath(oldOwnerName, repo.Name)
if err := gitrepo.RenameRepository(ctx, repo_model.StorageRepo(oldRelativePath), repo_model.StorageRepo(newRelativePath)); err != nil {
log.Critical("Unable to move wiki for repository %s/%s directory from %s back to correct place %s: %v", oldOwnerName, repo.Name,
repo_model.WikiPath(newOwnerName, repo.Name), repo_model.WikiPath(oldOwnerName, repo.Name), err)
oldRelativePath, newRelativePath, err)
}
}
@ -289,12 +291,12 @@ func transferOwnership(ctx context.Context, doer *user_model.User, newOwnerName
repoRenamed = true
// Rename remote wiki repository to new path and delete local copy.
wikiPath := repo_model.WikiPath(oldOwner.Name, repo.Name)
if isExist, err := util.IsExist(wikiPath); err != nil {
log.Error("Unable to check if %s exists. Error: %v", wikiPath, err)
wikiStorageRepo := repo_model.StorageRepo(repo_model.RelativeWikiPath(oldOwner.Name, repo.Name))
if isExist, err := gitrepo.IsRepositoryExist(ctx, wikiStorageRepo); err != nil {
log.Error("Unable to check if %s exists. Error: %v", wikiStorageRepo.RelativePath(), err)
return err
} else if isExist {
if err := util.Rename(wikiPath, repo_model.WikiPath(newOwner.Name, repo.Name)); err != nil {
if err := gitrepo.RenameRepository(ctx, wikiStorageRepo, repo_model.StorageRepo(repo_model.RelativeWikiPath(newOwner.Name, repo.Name))); err != nil {
return fmt.Errorf("rename repository wiki: %w", err)
}
wikiRenamed = true

View File

@ -120,7 +120,7 @@ func updateWikiPage(ctx context.Context, doer *user_model.User, repo *repo_model
cloneOpts.Branch = repo.DefaultWikiBranch
}
if err := git.Clone(ctx, repo.WikiPath(), basePath, cloneOpts); err != nil {
if err := gitrepo.CloneRepoToLocal(ctx, repo.WikiStorageRepo(), basePath, cloneOpts); err != nil {
log.Error("Failed to clone repository: %s (%v)", repo.FullName(), err)
return fmt.Errorf("failed to clone repository: %s (%w)", repo.FullName(), err)
}
@ -269,7 +269,7 @@ func DeleteWikiPage(ctx context.Context, doer *user_model.User, repo *repo_model
}
defer cleanup()
if err := git.Clone(ctx, repo.WikiPath(), basePath, git.CloneRepoOptions{
if err := gitrepo.CloneRepoToLocal(ctx, repo.WikiStorageRepo(), basePath, git.CloneRepoOptions{
Bare: true,
Shared: true,
Branch: repo.DefaultWikiBranch,

View File

@ -252,9 +252,9 @@
{{end}}
{{if .CacheConn}}
<dt>{{ctx.Locale.Tr "admin.config.cache_conn"}}</dt>
<dd><code>{{.CacheConn}}</code></dd>
<dd>{{.CacheConn}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.cache_item_ttl"}}</dt>
<dd><code>{{.CacheItemTTL}}</code></dd>
<dd>{{.CacheItemTTL}}</dd>
{{end}}
<div class="divider"></div>
<dt class="tw-py-1 tw-flex tw-items-center">{{ctx.Locale.Tr "admin.config.cache_test"}}</dt>
@ -275,7 +275,7 @@
<dt>{{ctx.Locale.Tr "admin.config.session_provider"}}</dt>
<dd>{{.SessionConfig.Provider}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.provider_config"}}</dt>
<dd><code>{{if .SessionConfig.ProviderConfig}}{{.SessionConfig.ProviderConfig}}{{else}}-{{end}}</code></dd>
<dd>{{if .SessionConfig.ProviderConfig}}{{.SessionConfig.ProviderConfig}}{{else}}-{{end}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.cookie_name"}}</dt>
<dd>{{.SessionConfig.CookieName}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.gc_interval_time"}}</dt>
@ -301,7 +301,7 @@
<dt>{{ctx.Locale.Tr "admin.config.git_max_diff_files"}}</dt>
<dd>{{.Git.MaxGitDiffFiles}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.git_gc_args"}}</dt>
<dd><code>{{.Git.GCArgs}}</code></dd>
<dd>{{.Git.GCArgs}}</dd>
<div class="divider"></div>
@ -330,7 +330,7 @@
{{if .Loggers.access.IsEnabled}}
<dt>{{ctx.Locale.Tr "admin.config.access_log_template"}}</dt>
<dd><code>{{$.AccessLogTemplate}}</code></dd>
<dd>{{$.AccessLogTemplate}}</dd>
{{end}}
{{range $loggerName, $loggerDetail := .Loggers}}

View File

@ -46,8 +46,8 @@
<div class="item tw-flex tw-items-center">
<span class="icon tw-mr-4">{{svg "octicon-dot-fill" 16}}</span>
<div class="content tw-flex-1">
<div class="header"><code>{{.Function}}</code></div>
<div class="description"><code>{{.File}}:{{.Line}}</code></div>
<div class="header">{{.Function}}</div>
<div class="description">{{.File}}:{{.Line}}</div>
</div>
</div>
{{end}}

View File

@ -2,7 +2,7 @@
{{if .Details}}
<details>
<summary>{{.Summary}}</summary>
<code>{{.Details | SanitizeHTML}}</code>
{{.Details | SanitizeHTML}}
</details>
{{else}}
<div>

View File

@ -65,7 +65,9 @@
</div>
<div class="ui container project-description">
{{$.Project.RenderedContent}}
<div class="render-content markup">
{{$.Project.RenderedContent}}
</div>
<div class="divider"></div>
</div>

View File

@ -28,9 +28,9 @@
</div>
<div class="flex-item-trailing">
{{if $run.IsRefDeleted}}
<span class="ui label run-list-ref gt-ellipsis tw-line-through" data-tooltip-content="{{$run.PrettyRef}}">{{$run.PrettyRef}}</span>
<span class="ui label run-list-ref gt-ellipsis tw-line-through" data-tooltip-content="{{$run.RefTooltip}}">{{$run.PrettyRef}}</span>
{{else}}
<a class="ui label run-list-ref gt-ellipsis" href="{{$run.RefLink}}" data-tooltip-content="{{$run.PrettyRef}}">{{$run.PrettyRef}}</a>
<a class="ui label run-list-ref gt-ellipsis" href="{{$run.RefLink}}" data-tooltip-content="{{$run.RefTooltip}}">{{$run.PrettyRef}}</a>
{{end}}
<div class="run-list-item-right">
<div class="run-list-meta">{{svg "octicon-calendar" 16}}{{DateUtils.TimeSince $run.Updated}}</div>

View File

@ -12,11 +12,11 @@
{{else if eq .status "cancelled"}}
{{svg "octicon-stop" $size (printf "text grey %s" $className)}}
{{else if eq .status "waiting"}}
{{svg "octicon-clock" $size (printf "text yellow %s" $className)}}
{{svg "octicon-circle" $size (printf "text grey %s" $className)}}
{{else if eq .status "blocked"}}
{{svg "octicon-blocked" $size (printf "text yellow %s" $className)}}
{{else if eq .status "running"}}
{{svg "octicon-meter" $size (printf "text yellow circular-spin %s" $className)}}
{{svg "gitea-running" $size (printf "text yellow circular-spin %s" $className)}}
{{else}}{{/*failure, unknown*/}}
{{svg "octicon-x-circle-fill" $size (printf "text red %s" $className)}}
{{end}}

View File

@ -11,11 +11,11 @@
{{end}}
{{if ne .FileSize nil}}
<div class="file-info-entry">
{{FileSize .FileSize}}{{if .IsLFSFile}}<span class="ui label">LFS</span>{{end}}
<span class="file-info-size">{{FileSize .FileSize}}</span>{{if .IsLFSFile}}<span class="ui label">LFS</span>{{end}}
</div>
{{end}}
{{if .LFSLock}}
<div class="file-info-entry ui" data-tooltip-content="{{.LFSLockHint}}">
<div class="file-info-entry" data-tooltip-content="{{.LFSLockHint}}">
{{svg "octicon-lock" 16 "tw-mr-1"}}
<a href="{{.LFSLockOwnerHomeLink}}">{{.LFSLockOwner}}</a>
</div>

View File

@ -39,7 +39,7 @@
<tbody>
<tr>
<td class="lines-num">{{.LineNums}}</td>
<td class="lines-code"><pre><code class="{{.HighlightClass}}"><ol>{{.FileContent}}</ol></code></pre></td>
<td class="lines-code"><pre>{{.FileContent}}</pre></td>
</tr>
</tbody>
</table>

View File

@ -68,7 +68,7 @@
{{range $key, $val := .RequestInfo.Headers}}<strong>{{$key}}:</strong> {{$val}}
{{end}}</pre>
<h5>{{ctx.Locale.Tr "repo.settings.webhook.payload"}}</h5>
<pre class="webhook-info"><code class="json">{{or .RequestInfo.Body .PayloadContent}}</code></pre>
<pre class="webhook-info">{{or .RequestInfo.Body .PayloadContent}}</pre>
{{else}}
-
{{end}}
@ -79,7 +79,7 @@
<pre class="webhook-info">{{range $key, $val := .ResponseInfo.Headers}}<strong>{{$key}}:</strong> {{$val}}
{{end}}</pre>
<h5>{{ctx.Locale.Tr "repo.settings.webhook.body"}}</h5>
<pre class="webhook-info"><code>{{.ResponseInfo.Body}}</code></pre>
<pre class="webhook-info">{{.ResponseInfo.Body}}</pre>
{{else}}
-
{{end}}

View File

@ -7844,7 +7844,7 @@
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/UpdateFileOptions"
"$ref": "#/definitions/ApplyDiffPatchFileOptions"
}
}
],
@ -21645,6 +21645,54 @@
},
"x-go-package": "code.gitea.io/gitea/modules/structs"
},
"ApplyDiffPatchFileOptions": {
"description": "ApplyDiffPatchFileOptions options for applying a diff patch\nNote: `author` and `committer` are optional (if only one is given, it will be used for the other, otherwise the authenticated user will be used)",
"type": "object",
"required": [
"content"
],
"properties": {
"author": {
"$ref": "#/definitions/Identity"
},
"branch": {
"description": "branch (optional) is the base branch for the changes. If not supplied, the default branch is used",
"type": "string",
"x-go-name": "BranchName"
},
"committer": {
"$ref": "#/definitions/Identity"
},
"content": {
"type": "string",
"x-go-name": "Content"
},
"dates": {
"$ref": "#/definitions/CommitDateOptions"
},
"force_push": {
"description": "force_push (optional) will do a force-push if the new branch already exists",
"type": "boolean",
"x-go-name": "ForcePush"
},
"message": {
"description": "message (optional) is the commit message of the changes. If not supplied, a default message will be used",
"type": "string",
"x-go-name": "Message"
},
"new_branch": {
"description": "new_branch (optional) will make a new branch from base branch for the changes. If not supplied, the changes will be committed to the base branch",
"type": "string",
"x-go-name": "NewBranchName"
},
"signoff": {
"description": "Add a Signed-off-by trailer by the committer at the end of the commit log message.",
"type": "boolean",
"x-go-name": "Signoff"
}
},
"x-go-package": "code.gitea.io/gitea/modules/structs"
},
"Attachment": {
"description": "Attachment a generic attachment",
"type": "object",

View File

@ -21,8 +21,8 @@
<label for="token">{{ctx.Locale.Tr "settings.gpg_token"}}</label>
<input readonly="" value="{{.TokenToSign}}">
<div class="help">
<p>{{ctx.Locale.Tr "settings.gpg_token_help"}}</p>
<p><code>{{printf `echo "%s" | gpg -a --default-key %s --detach-sig` .TokenToSign .PaddedKeyID}}</code></p>
{{ctx.Locale.Tr "settings.gpg_token_help"}}
<pre class="command-block">{{printf `echo "%s" | gpg -a --default-key %s --detach-sig` .TokenToSign .PaddedKeyID}}</pre>
</div>
</div>
<div class="field">
@ -89,8 +89,8 @@
<label for="token">{{ctx.Locale.Tr "settings.gpg_token"}}</label>
<input readonly="" value="{{$.TokenToSign}}">
<div class="help">
<p>{{ctx.Locale.Tr "settings.gpg_token_help"}}</p>
<p><code>{{printf `echo "%s" | gpg -a --default-key %s --detach-sig` $.TokenToSign .PaddedKeyID}}</code></p>
{{ctx.Locale.Tr "settings.gpg_token_help"}}
<pre class="command-block">{{printf `echo "%s" | gpg -a --default-key %s --detach-sig` $.TokenToSign .PaddedKeyID}}</pre>
</div>
<br>
</div>

View File

@ -77,16 +77,15 @@
<label for="token">{{ctx.Locale.Tr "settings.ssh_token"}}</label>
<input readonly="" value="{{$.TokenToSign}}">
<div class="help">
<p>{{ctx.Locale.Tr "settings.ssh_token_help"}}</p>
<p><code>echo -n '{{$.TokenToSign}}' | ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey</code></p>
{{ctx.Locale.Tr "settings.ssh_token_help"}}
<pre class="command-block">echo -n '{{$.TokenToSign}}' | ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey</pre>
<details>
<summary>Windows PowerShell</summary>
<p><code>cmd /c "&lt;NUL set /p=`"{{$.TokenToSign}}`"| ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey"</code></p>
<pre class="command-block">cmd /c "&lt;NUL set /p=`"{{$.TokenToSign}}`"| ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey"</pre>
</details>
<br>
<details>
<summary>Windows CMD</summary>
<p><code>set /p={{$.TokenToSign}}| ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey</code></p>
<pre class="command-block">set /p={{$.TokenToSign}}| ssh-keygen -Y sign -n gitea -f /path_to_PrivateKey_or_RelatedPublicKey</pre>
</details>
</div>
<br>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
// Copyright 2025 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package integration
import (
"fmt"
"net/http"
"net/url"
"testing"
auth_model "code.gitea.io/gitea/models/auth"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
api "code.gitea.io/gitea/modules/structs"
"github.com/stretchr/testify/assert"
)
func getApplyDiffPatchFileOptions() *api.ApplyDiffPatchFileOptions {
return &api.ApplyDiffPatchFileOptions{
FileOptions: api.FileOptions{
BranchName: "master",
},
Content: `diff --git a/patch-file-1.txt b/patch-file-1.txt
new file mode 100644
index 0000000000..aaaaaaaaaa
--- /dev/null
+++ b/patch-file-1.txt
@@ -0,0 +1 @@
+File 1
`,
}
}
func TestAPIApplyDiffPatchFileOptions(t *testing.T) {
onGiteaRun(t, func(t *testing.T, u *url.URL) {
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}) // owner of the repo1 & repo16
org3 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3}) // owner of the repo3, is an org
user4 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 4}) // owner of neither repos
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) // public repo
repo3 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 3}) // public repo
repo16 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 16}) // private repo
session2 := loginUser(t, user2.Name)
token2 := getTokenForLoggedInUser(t, session2, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
session4 := loginUser(t, user4.Name)
token4 := getTokenForLoggedInUser(t, session4, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
req := NewRequestWithJSON(t, "POST", "/api/v1/repos/user2/repo1/diffpatch", getApplyDiffPatchFileOptions()).AddTokenAuth(token2)
resp := MakeRequest(t, req, http.StatusCreated)
var fileResponse api.FileResponse
DecodeJSON(t, resp, &fileResponse)
assert.Nil(t, fileResponse.Content)
assert.NotEmpty(t, fileResponse.Commit.HTMLURL)
req = NewRequest(t, "GET", "/api/v1/repos/user2/repo1/raw/patch-file-1.txt")
resp = MakeRequest(t, req, http.StatusOK)
assert.Equal(t, "File 1\n", resp.Body.String())
// Test creating a file in repo1 by user4 who does not have write access
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", user2.Name, repo16.Name), getApplyDiffPatchFileOptions()).
AddTokenAuth(token4)
MakeRequest(t, req, http.StatusNotFound)
// Tests a repo with no token given so will fail
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", user2.Name, repo16.Name), getApplyDiffPatchFileOptions())
MakeRequest(t, req, http.StatusNotFound)
// Test using access token for a private repo that the user of the token owns
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", user2.Name, repo16.Name), getApplyDiffPatchFileOptions()).
AddTokenAuth(token2)
MakeRequest(t, req, http.StatusCreated)
// Test using org repo "org3/repo3" where user2 is a collaborator
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", org3.Name, repo3.Name), getApplyDiffPatchFileOptions()).
AddTokenAuth(token2)
MakeRequest(t, req, http.StatusCreated)
// Test using org repo "org3/repo3" with no user token
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", org3.Name, repo3.Name), getApplyDiffPatchFileOptions())
MakeRequest(t, req, http.StatusNotFound)
// Test using repo "user2/repo1" where user4 is a NOT collaborator
req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/diffpatch", user2.Name, repo1.Name), getApplyDiffPatchFileOptions()).
AddTokenAuth(token4)
MakeRequest(t, req, http.StatusForbidden)
})
}

View File

@ -5,6 +5,8 @@ package integration
import (
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"sync"
@ -23,7 +25,8 @@ import (
func TestGitLFSSSH(t *testing.T) {
onGiteaRun(t, func(t *testing.T, u *url.URL) {
dstPath := t.TempDir()
localRepoForUpload := filepath.Join(t.TempDir(), "test-upload")
localRepoForDownload := filepath.Join(t.TempDir(), "test-download")
apiTestContext := NewAPITestContext(t, "user2", "repo1", auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
var mu sync.Mutex
@ -37,7 +40,7 @@ func TestGitLFSSSH(t *testing.T) {
withKeyFile(t, "my-testing-key", func(keyFile string) {
t.Run("CreateUserKey", doAPICreateUserKey(apiTestContext, "test-key", keyFile))
cloneURL := createSSHUrl(apiTestContext.GitPath(), u)
t.Run("Clone", doGitClone(dstPath, cloneURL))
t.Run("CloneOrigin", doGitClone(localRepoForUpload, cloneURL))
cfg, err := setting.CfgProvider.PrepareSaving()
require.NoError(t, err)
@ -46,10 +49,15 @@ func TestGitLFSSSH(t *testing.T) {
require.NoError(t, cfg.Save())
_, _, cmdErr := gitcmd.NewCommand("config", "lfs.sshtransfer", "always").
WithDir(dstPath).
WithDir(localRepoForUpload).
RunStdString(t.Context())
assert.NoError(t, cmdErr)
lfsCommitAndPushTest(t, dstPath, 10)
pushedFiles := lfsCommitAndPushTest(t, localRepoForUpload, 10)
t.Run("CloneLFS", doGitClone(localRepoForDownload, cloneURL))
content, err := os.ReadFile(filepath.Join(localRepoForDownload, pushedFiles[0]))
assert.NoError(t, err)
assert.Len(t, content, 10)
})
countBatch := slices.ContainsFunc(routerCalls, func(s string) bool {
@ -58,12 +66,16 @@ func TestGitLFSSSH(t *testing.T) {
countUpload := slices.ContainsFunc(routerCalls, func(s string) bool {
return strings.Contains(s, "PUT /api/internal/repo/user2/repo1.git/info/lfs/objects/")
})
countDownload := slices.ContainsFunc(routerCalls, func(s string) bool {
return strings.Contains(s, "GET /api/internal/repo/user2/repo1.git/info/lfs/objects/")
})
nonAPIRequests := slices.ContainsFunc(routerCalls, func(s string) bool {
fields := strings.Fields(s)
return !strings.HasPrefix(fields[1], "/api/")
})
assert.NotZero(t, countBatch)
assert.NotZero(t, countUpload)
assert.NotZero(t, countDownload)
assert.Zero(t, nonAPIRequests)
})
}

View File

@ -72,6 +72,8 @@ func TestLFSRender(t *testing.T) {
fileInfo := doc.Find("div.file-info-entry").First().Text()
assert.Contains(t, fileInfo, "LFS")
fileSize := doc.Find("div.file-info-entry > .file-info-size").Text()
assert.Equal(t, "2.0 KiB", fileSize)
// find new file view container
fileViewContainer := doc.Find("[data-global-init=initRepoFileView]")

Some files were not shown because too many files have changed in this diff Show More