mirror of
https://github.com/go-gitea/gitea.git
synced 2025-12-16 22:15:43 +08:00
Merge de1b2d8ab8 into f25409fab8
This commit is contained in:
commit
be6fc710b0
@ -1083,6 +1083,15 @@ LEVEL = Info
|
||||
|
||||
;; Allow to fork repositories without maximum number limit
|
||||
;ALLOW_FORK_WITHOUT_MAXIMUM_LIMIT = true
|
||||
;
|
||||
;; Enable applying a global size limit defined by REPO_SIZE_LIMIT. Each repository can have a value that overrides the global limit
|
||||
;; "false" means no limit will be enforced, even if specified on a repository
|
||||
;ENABLE_SIZE_LIMIT = false
|
||||
;
|
||||
;; Specify a global repository size limit in bytes to apply for each repository. 0 - No limit
|
||||
;; If repository has it's own limit set in UI it will override the global setting
|
||||
;; Standard units of measurements for size can be used like B, KB, KiB, ... , EB, EiB, ...
|
||||
;REPO_SIZE_LIMIT = 0
|
||||
|
||||
;; Allow to fork repositories into the same owner (user or organization)
|
||||
;; This feature is experimental, not fully tested, and may be changed in the future
|
||||
|
||||
52
docs/content/administration/repo-size-limit.en-us.md
Normal file
52
docs/content/administration/repo-size-limit.en-us.md
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
date: "2023-05-24T13:00:00+00:00"
|
||||
title: "Per repository size limit"
|
||||
slug: "repo-size-limit"
|
||||
weight: 12
|
||||
toc: false
|
||||
draft: false
|
||||
aliases:
|
||||
- /en-us/repo-size-limit
|
||||
menu:
|
||||
sidebar:
|
||||
parent: "administration"
|
||||
name: "Per repository size limit"
|
||||
weight: 12
|
||||
identifier: "repo-size-limit"
|
||||
---
|
||||
|
||||
# Gitea per repository size limit setup
|
||||
|
||||
To use Gitea's experimental built-in per repository size limit support, Administrator must update the `app.ini` file:
|
||||
|
||||
```ini
|
||||
;; Enable applying a global size limit defined by REPO_SIZE_LIMIT. Each repository can have a value that overrides the global limit
|
||||
;; "false" means no limit will be enforced, even if specified on a repository
|
||||
ENABLE_SIZE_LIMIT = true
|
||||
|
||||
;; Specify a global repository size limit in bytes to apply for each repository. 0 - No limit
|
||||
;; If repository has it's own limit set in UI it will override the global setting
|
||||
;; Standard units of measurements for size can be used like B, KB, KiB, ... , EB, EiB, ...
|
||||
REPO_SIZE_LIMIT = 500 MB
|
||||
|
||||
This setting is persistent.
|
||||
|
||||
The size limitation is triggered when repository `disk size` + `new commit size` > `defined repository size limit`
|
||||
|
||||
If size limitation is triggered the feature would prevent commits that increase repository size on disk
|
||||
of gitea server and allow those that decrease it
|
||||
|
||||
# Gitea per repository size limit setup in UI
|
||||
|
||||
1. For Gitea admin it is possible during runtime to enable/disable limit size feature, change the global size limit on the fly.
|
||||
**This setting is not persistent across restarts**
|
||||
|
||||
`Admin panel/Site settings` -> `Repository management`
|
||||
|
||||
Persistance can be achieved if the limit is maintained by editing `app.ini` file
|
||||
|
||||
2. The individually set per repository limit in `Settings` of the
|
||||
repository would take precedence over global limit when the size limit
|
||||
feature is enabled. Only admin can modify those limits
|
||||
|
||||
**Note**: Size checking for large repositories is time consuming operation so time of push under size limit might increase up to a minute depending on your server hardware
|
||||
@ -398,6 +398,9 @@ func prepareMigrationTasks() []*migration {
|
||||
// Gitea 1.25.0 ends at migration ID number 322 (database version 323)
|
||||
|
||||
newMigration(323, "Add support for actions concurrency", v1_26.AddActionsConcurrency),
|
||||
|
||||
// to modify later
|
||||
newMigration(324, "Add size limit on repository", v1_26.AddSizeLimitOnRepo),
|
||||
}
|
||||
return preparedMigrations
|
||||
}
|
||||
|
||||
17
models/migrations/v1_26/v999.go
Normal file
17
models/migrations/v1_26/v999.go
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package v1_26
|
||||
|
||||
import (
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func AddSizeLimitOnRepo(x *xorm.Engine) error {
|
||||
type Repository struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
SizeLimit int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
}
|
||||
|
||||
return x.Sync2(new(Repository))
|
||||
}
|
||||
@ -201,7 +201,9 @@ type Repository struct {
|
||||
BaseRepo *Repository `xorm:"-"`
|
||||
IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
|
||||
TemplateID int64 `xorm:"INDEX"`
|
||||
SizeLimit int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
Size int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
EnableSizeLimit bool `xorm:"NOT NULL DEFAULT true"`
|
||||
GitSize int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
LFSSize int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
CodeIndexerStatus *RepoIndexerStatus `xorm:"-"`
|
||||
@ -630,6 +632,27 @@ func (repo *Repository) IsOwnedBy(userID int64) bool {
|
||||
return repo.OwnerID == userID
|
||||
}
|
||||
|
||||
// GetActualSizeLimit returns repository size limit in bytes
|
||||
// or global repository limit setting if per repository size limit is not set
|
||||
func (repo *Repository) GetActualSizeLimit() int64 {
|
||||
sizeLimit := repo.SizeLimit
|
||||
if setting.RepoSizeLimit > 0 && sizeLimit == 0 {
|
||||
sizeLimit = setting.RepoSizeLimit
|
||||
}
|
||||
return sizeLimit
|
||||
}
|
||||
|
||||
// RepoSizeIsOversized return true if is over size limitation
|
||||
func (repo *Repository) IsRepoSizeOversized(additionalSize int64) bool {
|
||||
return setting.EnableSizeLimit && repo.GetActualSizeLimit() > 0 && repo.GitSize+additionalSize > repo.GetActualSizeLimit()
|
||||
}
|
||||
|
||||
// RepoSizeLimitEnabled return true if size limit checking is enabled and limit is non zero for this specific repository
|
||||
// this is used to enable size checking during pre-receive hook
|
||||
func (repo *Repository) IsRepoSizeLimitEnabled() bool {
|
||||
return setting.EnableSizeLimit && repo.GetActualSizeLimit() > 0
|
||||
}
|
||||
|
||||
// CanCreateBranch returns true if repository meets the requirements for creating new branches.
|
||||
func (repo *Repository) CanCreateBranch() bool {
|
||||
return !repo.IsMirror
|
||||
|
||||
@ -95,6 +95,13 @@ func FileSize(s int64) string {
|
||||
return humanize.IBytes(uint64(s))
|
||||
}
|
||||
|
||||
// Get FileSize bytes value from String.
|
||||
func GetFileSize(s string) (int64, error) {
|
||||
v, err := humanize.ParseBytes(s)
|
||||
iv := int64(v)
|
||||
return iv, err
|
||||
}
|
||||
|
||||
// StringsToInt64s converts a slice of string to a slice of int64.
|
||||
func StringsToInt64s(strs []string) ([]int64, error) {
|
||||
if strs == nil {
|
||||
|
||||
@ -91,6 +91,13 @@ func TestFileSize(t *testing.T) {
|
||||
assert.Equal(t, "2.0 EiB", FileSize(size))
|
||||
}
|
||||
|
||||
func TestGetFileSize(t *testing.T) {
|
||||
var size int64 = 512 * 1024 * 1024 * 1024
|
||||
s, err := GetFileSize("512 GiB")
|
||||
assert.Equal(t, s, size)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestStringsToInt64s(t *testing.T) {
|
||||
testSuccess := func(input []string, expected []int64) {
|
||||
result, err := StringsToInt64s(input)
|
||||
|
||||
@ -229,6 +229,74 @@ func Push(ctx context.Context, repoPath string, opts PushOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CountObject represents repository count objects report
|
||||
type CountObject struct {
|
||||
Count int64
|
||||
Size int64
|
||||
InPack int64
|
||||
Packs int64
|
||||
SizePack int64
|
||||
PrunePack int64
|
||||
Garbage int64
|
||||
SizeGarbage int64
|
||||
}
|
||||
|
||||
const (
|
||||
statCount = "count: "
|
||||
statSize = "size: "
|
||||
statInpack = "in-pack: "
|
||||
statPacks = "packs: "
|
||||
statSizePack = "size-pack: "
|
||||
statPrunePackage = "prune-package: "
|
||||
statGarbage = "garbage: "
|
||||
statSizeGarbage = "size-garbage: "
|
||||
)
|
||||
|
||||
// CountObjects returns the results of git count-objects on the repoPath
|
||||
func CountObjects(ctx context.Context, repoPath string) (*CountObject, error) {
|
||||
return CountObjectsWithEnv(ctx, repoPath, nil)
|
||||
}
|
||||
|
||||
// CountObjectsWithEnv returns the results of git count-objects on the repoPath with custom env setup
|
||||
func CountObjectsWithEnv(ctx context.Context, repoPath string, env []string) (*CountObject, error) {
|
||||
cmd := gitcmd.NewCommand("count-objects", "-v")
|
||||
stdout, _, err := cmd.WithDir(repoPath).WithEnv(env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseSize(stdout), nil
|
||||
}
|
||||
|
||||
// parseSize parses the output from count-objects and return a CountObject
|
||||
func parseSize(objects string) *CountObject {
|
||||
repoSize := new(CountObject)
|
||||
for line := range strings.SplitSeq(objects, "\n") {
|
||||
switch {
|
||||
case strings.HasPrefix(line, statCount):
|
||||
repoSize.Count, _ = strconv.ParseInt(line[7:], 10, 64)
|
||||
case strings.HasPrefix(line, statSize):
|
||||
number, _ := strconv.ParseInt(line[6:], 10, 64)
|
||||
repoSize.Size = number * 1024
|
||||
case strings.HasPrefix(line, statInpack):
|
||||
repoSize.InPack, _ = strconv.ParseInt(line[9:], 10, 64)
|
||||
case strings.HasPrefix(line, statPacks):
|
||||
repoSize.Packs, _ = strconv.ParseInt(line[7:], 10, 64)
|
||||
case strings.HasPrefix(line, statSizePack):
|
||||
number, _ := strconv.ParseInt(line[11:], 10, 64)
|
||||
repoSize.SizePack = number * 1024
|
||||
case strings.HasPrefix(line, statPrunePackage):
|
||||
repoSize.PrunePack, _ = strconv.ParseInt(line[16:], 10, 64)
|
||||
case strings.HasPrefix(line, statGarbage):
|
||||
repoSize.Garbage, _ = strconv.ParseInt(line[9:], 10, 64)
|
||||
case strings.HasPrefix(line, statSizeGarbage):
|
||||
number, _ := strconv.ParseInt(line[14:], 10, 64)
|
||||
repoSize.SizeGarbage = number * 1024
|
||||
}
|
||||
}
|
||||
return repoSize
|
||||
}
|
||||
|
||||
// GetLatestCommitTime returns time for latest commit in repository (across all branches)
|
||||
func GetLatestCommitTime(ctx context.Context, repoPath string) (time.Time, error) {
|
||||
cmd := gitcmd.NewCommand("for-each-ref", "--sort=-committerdate", BranchPrefix, "--count", "1", "--format=%(committerdate)")
|
||||
|
||||
@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// enumerates all the policy repository creating
|
||||
@ -273,12 +275,35 @@ var (
|
||||
}
|
||||
RepoRootPath string
|
||||
ScriptType = "bash"
|
||||
|
||||
EnableSizeLimit = true
|
||||
RepoSizeLimit int64
|
||||
)
|
||||
|
||||
func SaveGlobalRepositorySetting(enableSizeLimit bool, repoSizeLimit int64) error {
|
||||
EnableSizeLimit = enableSizeLimit
|
||||
RepoSizeLimit = repoSizeLimit
|
||||
sec := CfgProvider.Section("repository")
|
||||
if EnableSizeLimit {
|
||||
sec.Key("ENABLE_SIZE_LIMIT").SetValue("true")
|
||||
} else {
|
||||
sec.Key("ENABLE_SIZE_LIMIT").SetValue("false")
|
||||
}
|
||||
|
||||
sec.Key("REPO_SIZE_LIMIT").SetValue(humanize.Bytes(uint64(RepoSizeLimit)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadRepositoryFrom(rootCfg ConfigProvider) {
|
||||
var err error
|
||||
|
||||
// Determine and create root git repository path.
|
||||
sec := rootCfg.Section("repository")
|
||||
EnableSizeLimit = sec.Key("ENABLE_SIZE_LIMIT").MustBool(false)
|
||||
|
||||
v, _ := humanize.ParseBytes(sec.Key("REPO_SIZE_LIMIT").MustString("0"))
|
||||
RepoSizeLimit = int64(v)
|
||||
|
||||
Repository.DisableHTTPGit = sec.Key("DISABLE_HTTP_GIT").MustBool()
|
||||
Repository.UseCompatSSHURI = sec.Key("USE_COMPAT_SSH_URI").MustBool()
|
||||
Repository.GoGetCloneURLProtocol = sec.Key("GO_GET_CLONE_URL_PROTOCOL").MustString("https")
|
||||
|
||||
@ -153,6 +153,8 @@ type CreateRepoOption struct {
|
||||
// ObjectFormatName of the underlying git repository
|
||||
// enum: sha1,sha256
|
||||
ObjectFormatName string `json:"object_format_name" binding:"MaxSize(6)"`
|
||||
// SizeLimit of the repository
|
||||
SizeLimit int64 `json:"size_limit"`
|
||||
}
|
||||
|
||||
// EditRepoOption options when editing a repository's properties
|
||||
@ -223,6 +225,8 @@ type EditRepoOption struct {
|
||||
DefaultAllowMaintainerEdit *bool `json:"default_allow_maintainer_edit,omitempty"`
|
||||
// set to `true` to archive this repository.
|
||||
Archived *bool `json:"archived,omitempty"`
|
||||
// SizeLimit of the repository.
|
||||
SizeLimit *int64 `json:"size_limit,omitempty"`
|
||||
// set to a string like `8h30m0s` to set the mirror interval time
|
||||
MirrorInterval *string `json:"mirror_interval,omitempty"`
|
||||
// enable prune - remove obsolete remote-tracking references when mirroring
|
||||
|
||||
@ -1052,6 +1052,7 @@ repo_name_profile_public_hint= .profile is a special repository that you can use
|
||||
repo_name_profile_private_hint = .profile-private is a special repository that you can use to add a README.md to your organization member profile, visible only to organization members. Make sure it's private and initialize it with a README in the profile directory to get started.
|
||||
repo_name_helper = Good repository names use short, memorable and unique keywords. A repository named ".profile" or ".profile-private" could be used to add a README.md for the user/organization profile.
|
||||
repo_size = Repository Size
|
||||
repo_size_limit = Repository Size Limit
|
||||
template = Template
|
||||
template_select = Select a template.
|
||||
template_helper = Make repository a template
|
||||
@ -1184,6 +1185,8 @@ form.reach_limit_of_creation_1 = The owner has already reached the limit of %d r
|
||||
form.reach_limit_of_creation_n = The owner has already reached the limit of %d repositories.
|
||||
form.name_reserved = The repository name "%s" is reserved.
|
||||
form.name_pattern_not_allowed = The pattern "%s" is not allowed in a repository name.
|
||||
form.repo_size_limit_negative = Repository size limitation cannot be negative.
|
||||
form.repo_size_limit_only_by_admins = Only administrators can change the repository size limitation.
|
||||
|
||||
need_auth = Authorization
|
||||
migrate_options = Migration Options
|
||||
@ -3446,6 +3449,13 @@ config.enable_federated_avatar = Enable Federated Avatars
|
||||
config.open_with_editor_app_help = The "Open with" editors for the clone menu. If left empty, the default will be used. Expand to see the default.
|
||||
config.git_guide_remote_name = Repository remote name for git commands in the guide
|
||||
|
||||
config.repository_config = Repository Configuration
|
||||
config.enable_size_limit = Enable Size Limit
|
||||
config.repo_size_limit = Default Repository Size Limit
|
||||
config.invalid_repo_size = Invalid repository size %s
|
||||
config.save_repo_size_setting_failed = Failed to save global repository settings %s
|
||||
config.repository_setting_success = Global repository setting has been updated
|
||||
|
||||
config.git_config = Git Configuration
|
||||
config.git_disable_diff_highlight = Disable Diff Syntax Highlight
|
||||
config.git_max_diff_lines = Max Diff Lines (for a single file)
|
||||
|
||||
@ -263,6 +263,7 @@ func CreateUserRepo(ctx *context.APIContext, owner *user_model.User, opt api.Cre
|
||||
TrustModel: repo_model.ToTrustModel(opt.TrustModel),
|
||||
IsTemplate: opt.Template,
|
||||
ObjectFormatName: opt.ObjectFormatName,
|
||||
SizeLimit: opt.SizeLimit,
|
||||
})
|
||||
if err != nil {
|
||||
if repo_model.IsErrRepoAlreadyExist(err) {
|
||||
@ -749,6 +750,10 @@ func updateBasicProperties(ctx *context.APIContext, opts api.EditRepoOption) err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.SizeLimit != nil {
|
||||
repo.SizeLimit = *opts.SizeLimit
|
||||
}
|
||||
|
||||
if err := repo_service.UpdateRepository(ctx, repo, visibilityChanged); err != nil {
|
||||
ctx.APIErrorInternal(err)
|
||||
return err
|
||||
|
||||
@ -8,6 +8,12 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
||||
git_model "code.gitea.io/gitea/models/git"
|
||||
@ -16,6 +22,7 @@ import (
|
||||
access_model "code.gitea.io/gitea/models/perm/access"
|
||||
"code.gitea.io/gitea/models/unit"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/base"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/git/gitcmd"
|
||||
"code.gitea.io/gitea/modules/gitrepo"
|
||||
@ -105,8 +112,318 @@ func (ctx *preReceiveContext) AssertCreatePullRequest() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// calculateSizeOfObject calculates the size of one git object via git cat-file -s command
|
||||
func calculateSizeOfObject(ctx *gitea_context.PrivateContext, dir string, env []string, objectID string) (int64, error) {
|
||||
objectSizeStr, _, err := gitcmd.NewCommand("cat-file", "-s").AddDynamicArguments(objectID).WithDir(dir).WithEnv(env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Trace("CalculateSizeOfRemovedObjects: Error during git cat-file -s on object: %s", objectID)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var errParse error
|
||||
var objectSize int64
|
||||
objectSize, errParse = strconv.ParseInt(strings.TrimSpace(objectSizeStr), 10, 64)
|
||||
if errParse != nil {
|
||||
log.Trace("CalculateSizeOfRemovedObjects: Error during ParseInt on string '%s'", objectID)
|
||||
return 0, errParse
|
||||
}
|
||||
return objectSize, nil
|
||||
}
|
||||
|
||||
// calculateSizeOfObjectsFromCache calculates the size of objects added and removed from the repository by new push
|
||||
// it uses data that was cached about the repository for this run
|
||||
func calculateSizeOfObjectsFromCache(newCommitObjects, oldCommitObjects, otherCommitObjects map[string]bool, commitObjectsSizes map[string]int64) (addedSize, removedSize int64) {
|
||||
// Calculate size of objects that were added
|
||||
for objectID := range newCommitObjects {
|
||||
if _, exists := oldCommitObjects[objectID]; !exists {
|
||||
// objectID is not referenced in the list of objects of old commit so it is a new object
|
||||
// Calculate its size and add it to the addedSize
|
||||
addedSize += commitObjectsSizes[objectID]
|
||||
}
|
||||
// We might check here if new object is not already in the rest of repo to be precise
|
||||
// However our goal is to prevent growth of repository so on determination of addedSize
|
||||
// We can skip this preciseness, addedSize will be more then real addedSize
|
||||
// TODO - do not count size of object that is referenced in other part of repo but not referenced neither in old nor new commit
|
||||
// git will not add the object twice
|
||||
}
|
||||
|
||||
// Calculate size of objects that were removed
|
||||
for objectID := range oldCommitObjects {
|
||||
if _, exists := newCommitObjects[objectID]; !exists {
|
||||
// objectID is not referenced in the list of new commit objects so it was possibly removed
|
||||
if _, exists := otherCommitObjects[objectID]; !exists {
|
||||
// objectID is not referenced in rest of the objects of the repository so it was removed
|
||||
// Calculate its size and add it to the removedSize
|
||||
removedSize += commitObjectsSizes[objectID]
|
||||
}
|
||||
}
|
||||
}
|
||||
return addedSize, removedSize
|
||||
}
|
||||
|
||||
// convertObjectsToMap takes a newline-separated string of git objects and
|
||||
// converts it into a map for efficient lookup.
|
||||
func convertObjectsToMap(objects string) map[string]bool {
|
||||
objectsMap := make(map[string]bool)
|
||||
for object := range strings.SplitSeq(objects, "\n") {
|
||||
if len(object) == 0 {
|
||||
continue
|
||||
}
|
||||
objectID := strings.Split(object, " ")[0]
|
||||
objectsMap[objectID] = true
|
||||
}
|
||||
return objectsMap
|
||||
}
|
||||
|
||||
// convertObjectsToSlice converts a list of hashes in a string from the git rev-list --objects command to a slice of string objects
|
||||
func convertObjectsToSlice(objects string) (objectIDs []string) {
|
||||
for object := range strings.SplitSeq(objects, "\n") {
|
||||
if len(object) == 0 {
|
||||
continue
|
||||
}
|
||||
objectID := strings.Split(object, " ")[0]
|
||||
objectIDs = append(objectIDs, objectID)
|
||||
}
|
||||
return objectIDs
|
||||
}
|
||||
|
||||
// loadObjectSizesFromPack access all packs that this push or repo has
|
||||
// and load compressed object size in bytes into objectSizes map
|
||||
// using `git verify-pack -v` output
|
||||
func loadObjectSizesFromPack(ctx *gitea_context.PrivateContext, dir string, env, _ []string, objectsSizes map[string]int64) error {
|
||||
// Find the path from GIT_QUARANTINE_PATH environment variable (path to the pack file)
|
||||
var packPath string
|
||||
var errExec error
|
||||
for _, envVar := range env {
|
||||
split := strings.SplitN(envVar, "=", 2)
|
||||
if split[0] == "GIT_QUARANTINE_PATH" {
|
||||
packPath = split[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if no quarantinPath determined we silently ignore
|
||||
if packPath == "" {
|
||||
log.Trace("GIT_QUARANTINE_PATH not found in the environment variables. Will read the pack files from main repo instead")
|
||||
packPath = filepath.Join(ctx.Repo.Repository.RepoPath(), "./objects/")
|
||||
}
|
||||
log.Warn("packPath: %s", packPath)
|
||||
|
||||
// Find all pack files *.idx in the quarantine directory
|
||||
packFiles, err := filepath.Glob(filepath.Join(packPath, "./pack/*.idx"))
|
||||
// if pack file not found we silently ignore
|
||||
if err != nil {
|
||||
log.Trace("Error during finding pack files %s: %v", filepath.Join(packPath, "./pack/*.idx"), err)
|
||||
}
|
||||
|
||||
// Loop over each pack file
|
||||
i := 0
|
||||
for _, packFile := range packFiles {
|
||||
log.Trace("Processing packfile %s", packFile)
|
||||
// Extract and store in cache objectsSizes the sizes of the object parsing output of the `git verify-pack` command
|
||||
output, _, err := gitcmd.NewCommand("verify-pack", "-v").AddDynamicArguments(packFile).WithDir(dir).WithEnv(env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Trace("Error during git verify-pack on pack file: %s", packFile)
|
||||
if errExec == nil {
|
||||
errExec = err
|
||||
} else {
|
||||
errExec = fmt.Errorf("%w; %v", errExec, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parsing the output of the git verify-pack command
|
||||
lines := strings.SplitSeq(output, "\n")
|
||||
for line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 4 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Second field has object type
|
||||
// If object type is not known filter it out and do not process
|
||||
objectType := fields[1]
|
||||
if objectType != "commit" && objectType != "tree" && objectType != "blob" && objectType != "tag" {
|
||||
continue
|
||||
}
|
||||
|
||||
// First field would have an object hash
|
||||
objectID := fields[0]
|
||||
|
||||
// Forth field would have an object compressed size
|
||||
size, err := strconv.ParseInt(fields[3], 10, 64)
|
||||
if err != nil {
|
||||
log.Trace("Failed to parse size for object %s: %v", objectID, err)
|
||||
continue
|
||||
}
|
||||
i++
|
||||
objectsSizes[objectID] = size
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace("Loaded %d items from packfiles", i)
|
||||
return errExec
|
||||
}
|
||||
|
||||
// loadObjectsSizesViaCatFile uses hashes from objectIDs and runs `git cat-file -s` in 10 workers to return each object sizes
|
||||
// Objects for which size is already loaded are skipped
|
||||
// can't use `git cat-file --batch-check` here as it only provides data from git DB before the commit applied and has no knowledge on new commit objects
|
||||
func loadObjectsSizesViaCatFile(ctx *gitea_context.PrivateContext, dir string, env, objectIDs []string, objectsSizes map[string]int64) error {
|
||||
// This is the number of workers that will simultaneously process CalculateSizeOfObject.
|
||||
const numWorkers = 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
|
||||
// errExec will hold the first error.
|
||||
var errOnce sync.Once
|
||||
var errExec error
|
||||
|
||||
// errCount will count how many *additional* errors occurred after the first one.
|
||||
var errCount int64
|
||||
|
||||
// Prepare numWorkers slices to store the work
|
||||
reducedObjectIDs := make([][]string, numWorkers)
|
||||
for i := range reducedObjectIDs {
|
||||
reducedObjectIDs[i] = make([]string, 0, len(objectIDs)/numWorkers+1)
|
||||
}
|
||||
|
||||
// Loop over all objectIDs and find which ones are missing size information
|
||||
i := 0
|
||||
for _, objectID := range objectIDs {
|
||||
_, exists := objectsSizes[objectID]
|
||||
|
||||
// If object doesn't yet have size in objectsSizes add it for further processing
|
||||
if !exists {
|
||||
reducedObjectIDs[i%numWorkers] = append(reducedObjectIDs[i%numWorkers], objectID)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// Start workers and determine size using `git cat-file -s`, store in objectsSizes cache
|
||||
for w := 1; w <= numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func(reducedObjectIDs *[]string) {
|
||||
defer wg.Done()
|
||||
for _, objectID := range *reducedObjectIDs {
|
||||
ctx := ctx
|
||||
// Ensure that each worker has its own copy of the env environment to prevent races
|
||||
env := append([]string(nil), env...)
|
||||
|
||||
objectSize, err := calculateSizeOfObject(ctx, dir, env, objectID)
|
||||
// Upon error we store the first error and continue processing, as we can't stop the push
|
||||
// if we were not able to calculate the size of the object, but we keep one error to
|
||||
// return at the end, along with a count of subsequent similar errors.
|
||||
if err != nil {
|
||||
ran := false
|
||||
errOnce.Do(func() {
|
||||
errExec = err
|
||||
ran = true
|
||||
})
|
||||
if !ran {
|
||||
// This is not the first error – count it as a subsequent error.
|
||||
atomic.AddInt64(&errCount, 1)
|
||||
}
|
||||
}
|
||||
|
||||
mu.Lock() // Protecting shared resource
|
||||
objectsSizes[objectID] = objectSize
|
||||
mu.Unlock() // Releasing shared resource for other goroutines
|
||||
}
|
||||
}(&reducedObjectIDs[(w-1)%numWorkers])
|
||||
}
|
||||
|
||||
// Wait for all workers to finish processing.
|
||||
wg.Wait()
|
||||
|
||||
if errExec == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there were additional errors, wrap the first one with a summary.
|
||||
if n := atomic.LoadInt64(&errCount); n > 0 {
|
||||
return fmt.Errorf("%w (and %d subsequent similar errors)", errExec, n)
|
||||
}
|
||||
|
||||
// Only one error occurred.
|
||||
return errExec
|
||||
}
|
||||
|
||||
// loadObjectsSizesViaBatch uses hashes from objectIDs and uses pre-opened `git cat-file --batch-check` command to slice and return each object sizes
|
||||
// This function can't be used for new commit objects.
|
||||
// It speeds up loading object sizes from existing git database of the repository avoiding
|
||||
// multiple `git cat-files -s`
|
||||
func loadObjectsSizesViaBatch(ctx *gitea_context.PrivateContext, repoPath string, objectIDs []string, objectsSizes map[string]int64) error {
|
||||
var i int32
|
||||
|
||||
reducedObjectIDs := make([]string, 0, len(objectIDs))
|
||||
|
||||
// Loop over all objectIDs and find which ones are missing size information
|
||||
for _, objectID := range objectIDs {
|
||||
_, exists := objectsSizes[objectID]
|
||||
|
||||
// If object doesn't yet have size in objectsSizes add it for further processing
|
||||
if !exists {
|
||||
reducedObjectIDs = append(reducedObjectIDs, objectID)
|
||||
}
|
||||
}
|
||||
|
||||
batch, err := git.NewBatchCheck(ctx, repoPath)
|
||||
if err != nil {
|
||||
log.Error("Unable to create CatFileBatchCheck in %s Error: %v", repoPath, err)
|
||||
return fmt.Errorf("Fail to create CatFileBatchCheck: %v", err)
|
||||
}
|
||||
defer batch.Close()
|
||||
wr := batch.Writer
|
||||
rd := batch.Reader
|
||||
|
||||
for _, commitID := range reducedObjectIDs {
|
||||
_, err := wr.Write([]byte(commitID + "\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
line, err := rd.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(line) == 1 {
|
||||
line, err = rd.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
objectID := fields[0]
|
||||
if len(fields) < 3 || len(fields) > 3 {
|
||||
log.Trace("String '%s' does not contain size ignored %s: %v", line, objectID, err)
|
||||
continue
|
||||
}
|
||||
sizeStr := fields[2]
|
||||
size, err := parseSize(sizeStr)
|
||||
if err != nil {
|
||||
log.Trace("String '%s' Failed to parse size for object %s: %v", line, objectID, err)
|
||||
continue
|
||||
}
|
||||
objectsSizes[objectID] = size
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSize parses the object size from a string
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse object size: %w", err)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// HookPreReceive checks whether a individual commit is acceptable
|
||||
func HookPreReceive(ctx *gitea_context.PrivateContext) {
|
||||
startTime := time.Now()
|
||||
|
||||
opts := web.GetForm(ctx).(*private.HookOptions)
|
||||
|
||||
ourCtx := &preReceiveContext{
|
||||
@ -115,12 +432,150 @@ func HookPreReceive(ctx *gitea_context.PrivateContext) {
|
||||
opts: opts,
|
||||
}
|
||||
|
||||
repo := ourCtx.Repo.Repository
|
||||
|
||||
var addedSize int64
|
||||
var removedSize int64
|
||||
var isRepoOversized bool
|
||||
var pushSize *git.CountObject
|
||||
var repoSize *git.CountObject
|
||||
var err error
|
||||
var duration time.Duration
|
||||
|
||||
if repo.IsRepoSizeLimitEnabled() {
|
||||
// Calculating total size of the repo using `git count-objects`
|
||||
repoSize, err = git.CountObjects(ctx, repo.RepoPath())
|
||||
if err != nil {
|
||||
log.Error("Unable to get repository size with env %v: %s Error: %v", repo.RepoPath(), ourCtx.env, err)
|
||||
ctx.JSON(http.StatusInternalServerError, map[string]any{
|
||||
"err": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Calculating total size of the push using `git count-objects`
|
||||
pushSize, err = git.CountObjectsWithEnv(ctx, repo.RepoPath(), ourCtx.env)
|
||||
if err != nil {
|
||||
log.Error("Unable to get push size with env %v: %s Error: %v", repo.RepoPath(), ourCtx.env, err)
|
||||
ctx.JSON(http.StatusInternalServerError, map[string]any{
|
||||
"err": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Cache whether the repository would breach the size limit after the operation
|
||||
isRepoOversized = repo.IsRepoSizeOversized(pushSize.Size + pushSize.SizePack)
|
||||
log.Warn("Push counts %+v", pushSize)
|
||||
log.Warn("Repo counts %+v", repoSize)
|
||||
}
|
||||
|
||||
// Iterate across the provided old commit IDs
|
||||
for i := range opts.OldCommitIDs {
|
||||
oldCommitID := opts.OldCommitIDs[i]
|
||||
newCommitID := opts.NewCommitIDs[i]
|
||||
refFullName := opts.RefFullNames[i]
|
||||
|
||||
log.Trace("Processing old commit: %s, new commit: %s, ref: %s", oldCommitID, newCommitID, refFullName)
|
||||
|
||||
// If operation is in potential breach of size limit prepare data for analysis
|
||||
if isRepoOversized {
|
||||
var gitObjects string
|
||||
var errLoop error
|
||||
|
||||
// Create cache of objects in old commit
|
||||
// if oldCommitID all 0 then it's a fresh repository on gitea server and all git operations on such oldCommitID would fail
|
||||
if oldCommitID != "0000000000000000000000000000000000000000" {
|
||||
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects").AddDynamicArguments(oldCommitID).WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Error("Unable to list objects in old commit: %s in %-v Error: %v", oldCommitID, repo, err)
|
||||
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||
Err: fmt.Sprintf("Fail to list objects in old commit: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
commitObjectsSizes := make(map[string]int64)
|
||||
oldCommitObjects := convertObjectsToMap(gitObjects)
|
||||
objectIDs := convertObjectsToSlice(gitObjects)
|
||||
|
||||
// Create cache of objects that are in the repository but not part of old or new commit
|
||||
// if oldCommitID all 0 then it's a fresh repository on gitea server and all git operations on such oldCommitID would fail
|
||||
if oldCommitID == "0000000000000000000000000000000000000000" {
|
||||
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects", "--all").AddDynamicArguments("^" + newCommitID).WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Error("Unable to list objects in the repo that are missing from both old %s and new %s commits in %-v Error: %v", oldCommitID, newCommitID, repo, err)
|
||||
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||
Err: fmt.Sprintf("Fail to list objects missing from both old and new commits: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects", "--all").AddDynamicArguments("^"+oldCommitID, "^"+newCommitID).WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Error("Unable to list objects in the repo that are missing from both old %s and new %s commits in %-v Error: %v", oldCommitID, newCommitID, repo, err)
|
||||
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||
Err: fmt.Sprintf("Fail to list objects missing from both old and new commits: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
otherCommitObjects := convertObjectsToMap(gitObjects)
|
||||
objectIDs = append(objectIDs, convertObjectsToSlice(gitObjects)...)
|
||||
// Unfortunately `git cat-file --check-batch` shows full object size
|
||||
// so we would load compressed sizes from pack file via `git verify-pack -v` if there are pack files in repo
|
||||
// The result would still miss items that are loose as individual objects (not part of pack files)
|
||||
if repoSize.InPack > 0 {
|
||||
errLoop = loadObjectSizesFromPack(ctx, repo.RepoPath(), nil, objectIDs, commitObjectsSizes)
|
||||
if errLoop != nil {
|
||||
log.Error("Unable to get sizes of objects from the pack in %-v Error: %v", repo, errLoop)
|
||||
}
|
||||
}
|
||||
|
||||
// Load loose objects that are missing
|
||||
errLoop = loadObjectsSizesViaBatch(ctx, repo.RepoPath(), objectIDs, commitObjectsSizes)
|
||||
if errLoop != nil {
|
||||
log.Error("Unable to get sizes of objects that are missing in both old %s and new commits %s in %-v Error: %v", oldCommitID, newCommitID, repo, errLoop)
|
||||
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||
Err: fmt.Sprintf("Fail to get sizes of objects missing in both old and new commit and those in old commit: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Create cache of objects in new commit
|
||||
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects").AddDynamicArguments(newCommitID).WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
|
||||
if err != nil {
|
||||
log.Error("Unable to list objects in new commit %s in %-v Error: %v", newCommitID, repo, err)
|
||||
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||
Err: fmt.Sprintf("Fail to list objects in new commit: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
newCommitObjects := convertObjectsToMap(gitObjects)
|
||||
objectIDs = convertObjectsToSlice(gitObjects)
|
||||
// Unfortunately `git cat-file --check-batch` doesn't work on objects not yet accepted into git database
|
||||
// so the sizes will be calculated through pack file `git verify-pack -v` if there are pack files
|
||||
// The result would still miss items that were sent loose as individual objects (not part of pack files)
|
||||
if pushSize.InPack > 0 {
|
||||
errLoop = loadObjectSizesFromPack(ctx, repo.RepoPath(), ourCtx.env, objectIDs, commitObjectsSizes)
|
||||
if errLoop != nil {
|
||||
log.Error("Unable to get sizes of objects from the pack in new commit %s in %-v Error: %v", newCommitID, repo, errLoop)
|
||||
}
|
||||
}
|
||||
|
||||
// After loading everything we could from pack file, objects could have been sent as loose bunch as well
|
||||
// We need to load them individually with `git cat-file -s` on any object that is missing from accumulated size cache commitObjectsSizes
|
||||
errLoop = loadObjectsSizesViaCatFile(ctx, repo.RepoPath(), ourCtx.env, objectIDs, commitObjectsSizes)
|
||||
if errLoop != nil {
|
||||
log.Error("Unable to get sizes of objects in new commit %s in %-v Error: %v", newCommitID, repo, errLoop)
|
||||
}
|
||||
|
||||
// Calculate size that was added and removed by the new commit
|
||||
addedSize, removedSize = calculateSizeOfObjectsFromCache(newCommitObjects, oldCommitObjects, otherCommitObjects, commitObjectsSizes)
|
||||
}
|
||||
|
||||
switch {
|
||||
case refFullName.IsBranch():
|
||||
preReceiveBranch(ourCtx, oldCommitID, newCommitID, refFullName)
|
||||
@ -136,6 +591,20 @@ func HookPreReceive(ctx *gitea_context.PrivateContext) {
|
||||
}
|
||||
}
|
||||
|
||||
if repo.IsRepoSizeLimitEnabled() {
|
||||
duration = time.Since(startTime)
|
||||
log.Warn("During size checking - Addition in size is: %d, removal in size is: %d, limit size: %d, push size: %d, repo size: %d. Took %s seconds.", addedSize, removedSize, repo.GetActualSizeLimit(), pushSize.Size+pushSize.SizePack, repo.GitSize, duration)
|
||||
}
|
||||
|
||||
// If total of commits add more size then they remove and we are in a potential breach of size limit -- abort
|
||||
if (addedSize > removedSize) && isRepoOversized {
|
||||
log.Warn("Forbidden: new repo size %s would be over limitation of %s. Push size: %s. Took %s seconds. addedSize: %s. removedSize: %s", base.FileSize(repo.GitSize+addedSize-removedSize), base.FileSize(repo.GetActualSizeLimit()), base.FileSize(pushSize.Size+pushSize.SizePack), duration, base.FileSize(addedSize), base.FileSize(removedSize))
|
||||
ctx.JSON(http.StatusForbidden, private.Response{
|
||||
UserMsg: "New repository size is over limitation of " + base.FileSize(repo.GetActualSizeLimit()),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
ctx.PlainText(http.StatusOK, "ok")
|
||||
}
|
||||
|
||||
|
||||
@ -11,12 +11,15 @@ import (
|
||||
"code.gitea.io/gitea/models/db"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/base"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/templates"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/modules/web"
|
||||
"code.gitea.io/gitea/routers/web/explore"
|
||||
"code.gitea.io/gitea/services/context"
|
||||
"code.gitea.io/gitea/services/forms"
|
||||
repo_service "code.gitea.io/gitea/services/repository"
|
||||
)
|
||||
|
||||
@ -30,6 +33,9 @@ func Repos(ctx *context.Context) {
|
||||
ctx.Data["Title"] = ctx.Tr("admin.repositories")
|
||||
ctx.Data["PageIsAdminRepositories"] = true
|
||||
|
||||
ctx.Data["EnableSizeLimit"] = setting.EnableSizeLimit
|
||||
ctx.Data["RepoSizeLimit"] = base.FileSize(setting.RepoSizeLimit)
|
||||
|
||||
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
|
||||
Private: true,
|
||||
PageSize: setting.UI.Admin.RepoPagingNum,
|
||||
@ -38,6 +44,54 @@ func Repos(ctx *context.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
func UpdateRepoPost(ctx *context.Context) {
|
||||
temp := web.GetForm(ctx)
|
||||
if temp == nil {
|
||||
ctx.Data["Err_Repo_Size_Limit"] = ""
|
||||
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
|
||||
Private: true,
|
||||
PageSize: setting.UI.Admin.RepoPagingNum,
|
||||
TplName: tplRepos,
|
||||
OnlyShowRelevant: false,
|
||||
})
|
||||
return
|
||||
}
|
||||
form := temp.(*forms.UpdateGlobalRepoFrom)
|
||||
ctx.Data["Title"] = ctx.Tr("admin.repositories")
|
||||
ctx.Data["PageIsAdminRepositories"] = true
|
||||
|
||||
repoSizeLimit, err := base.GetFileSize(form.RepoSizeLimit)
|
||||
|
||||
ctx.Data["EnableSizeLimit"] = form.EnableSizeLimit
|
||||
ctx.Data["RepoSizeLimit"] = form.RepoSizeLimit
|
||||
|
||||
if err != nil {
|
||||
ctx.Data["Err_Repo_Size_Limit"] = err.Error()
|
||||
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
|
||||
Private: true,
|
||||
PageSize: setting.UI.Admin.RepoPagingNum,
|
||||
TplName: tplRepos,
|
||||
OnlyShowRelevant: false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
err = setting.SaveGlobalRepositorySetting(form.EnableSizeLimit, repoSizeLimit)
|
||||
if err != nil {
|
||||
ctx.Data["Err_Repo_Size_Save"] = err.Error()
|
||||
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
|
||||
Private: true,
|
||||
PageSize: setting.UI.Admin.RepoPagingNum,
|
||||
TplName: tplRepos,
|
||||
OnlyShowRelevant: false,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Flash.Success(ctx.Tr("admin.config.repository_setting_success"))
|
||||
ctx.Redirect(setting.AppSubURL + "/admin/repos")
|
||||
}
|
||||
|
||||
// DeleteRepo delete one repository
|
||||
func DeleteRepo(ctx *context.Context) {
|
||||
repo, err := repo_model.GetRepositoryByID(ctx, ctx.FormInt64("id"))
|
||||
|
||||
26
routers/web/admin/repos_test.go
Normal file
26
routers/web/admin/repos_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
"code.gitea.io/gitea/services/contexttest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUpdateRepoPost(t *testing.T) {
|
||||
unittest.PrepareTestEnv(t)
|
||||
ctx, _ := contexttest.MockContext(t, "admin/repos")
|
||||
contexttest.LoadUser(t, ctx, 1)
|
||||
|
||||
ctx.Req.Form.Set("enable_size_limit", "on")
|
||||
ctx.Req.Form.Set("repo_size_limit", "222 kcmcm")
|
||||
|
||||
UpdateRepoPost(ctx)
|
||||
|
||||
assert.NotEmpty(t, ctx.Flash.ErrorMsg)
|
||||
}
|
||||
@ -141,6 +141,18 @@ func RenderRepoSearch(ctx *context.Context, opts *RepoSearchOptions) {
|
||||
pager.AddParamFromRequest(ctx.Req)
|
||||
ctx.Data["Page"] = pager
|
||||
|
||||
if ctx.Data["Err_Repo_Size_Limit"] != nil {
|
||||
ctx.RenderWithErr(ctx.Tr("admin.config.invalid_repo_size", ctx.Data["Err_Repo_Size_Limit"].(string)),
|
||||
opts.TplName, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if ctx.Data["Err_Repo_Size_Save"] != nil {
|
||||
ctx.RenderWithErr(ctx.Tr("admin.config.save_repo_size_setting_failed", ctx.Data["Err_Repo_Size_Save"].(string)),
|
||||
opts.TplName, nil)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.HTML(http.StatusOK, opts.TplName)
|
||||
}
|
||||
|
||||
|
||||
@ -288,6 +288,7 @@ func CreatePost(ctx *context.Context) {
|
||||
IsTemplate: form.Template,
|
||||
TrustModel: repo_model.DefaultTrustModel,
|
||||
ObjectFormatName: form.ObjectFormatName,
|
||||
SizeLimit: form.SizeLimit,
|
||||
})
|
||||
if err == nil {
|
||||
log.Trace("Repository created [%d]: %s/%s", repo.ID, ctxUser.Name, repo.Name)
|
||||
|
||||
@ -60,6 +60,9 @@ func SettingsCtxData(ctx *context.Context) {
|
||||
ctx.Data["DefaultMirrorInterval"] = setting.Mirror.DefaultInterval
|
||||
ctx.Data["MinimumMirrorInterval"] = setting.Mirror.MinInterval
|
||||
ctx.Data["CanConvertFork"] = ctx.Repo.Repository.IsFork && ctx.Doer.CanCreateRepoIn(ctx.Repo.Repository.Owner)
|
||||
ctx.Data["Err_RepoSize"] = ctx.Repo.Repository.IsRepoSizeOversized(ctx.Repo.Repository.GetActualSizeLimit() / 10) // less than 10% left
|
||||
ctx.Data["ActualSizeLimit"] = ctx.Repo.Repository.GetActualSizeLimit()
|
||||
ctx.Data["EnableSizeLimit"] = setting.EnableSizeLimit
|
||||
|
||||
signing, _ := gitrepo.GetSigningKey(ctx, ctx.Repo.Repository)
|
||||
ctx.Data["SigningKeyAvailable"] = signing != nil
|
||||
@ -109,6 +112,9 @@ func SettingsPost(ctx *context.Context) {
|
||||
ctx.Data["SigningSettings"] = setting.Repository.Signing
|
||||
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled
|
||||
|
||||
repo := ctx.Repo.Repository
|
||||
ctx.Data["Err_RepoSize"] = repo.IsRepoSizeOversized(repo.GetActualSizeLimit() / 10) // less than 10% left
|
||||
|
||||
switch ctx.FormString("action") {
|
||||
case "update":
|
||||
handleSettingsPostUpdate(ctx)
|
||||
@ -207,6 +213,19 @@ func handleSettingsPostUpdate(ctx *context.Context) {
|
||||
repo.Website = form.Website
|
||||
repo.IsTemplate = form.Template
|
||||
|
||||
if form.RepoSizeLimit < 0 {
|
||||
ctx.Data["Err_RepoSizeLimit"] = true
|
||||
ctx.RenderWithErr(ctx.Tr("repo.form.repo_size_limit_negative"), tplSettingsOptions, &form)
|
||||
return
|
||||
}
|
||||
|
||||
if !ctx.Doer.IsAdmin && repo.SizeLimit != form.RepoSizeLimit {
|
||||
ctx.Data["Err_RepoSizeLimit"] = true
|
||||
ctx.RenderWithErr(ctx.Tr("repo.form.repo_size_limit_only_by_admins"), tplSettingsOptions, &form)
|
||||
return
|
||||
}
|
||||
repo.SizeLimit = form.RepoSizeLimit
|
||||
|
||||
if err := repo_service.UpdateRepository(ctx, repo, false); err != nil {
|
||||
ctx.ServerError("UpdateRepository", err)
|
||||
return
|
||||
|
||||
@ -766,6 +766,7 @@ func registerWebRoutes(m *web.Router) {
|
||||
m.Get("", admin.Repos)
|
||||
m.Combo("/unadopted").Get(admin.UnadoptedRepos).Post(admin.AdoptOrDeleteRepository)
|
||||
m.Post("/delete", admin.DeleteRepo)
|
||||
m.Post("", web.Bind(forms.UpdateGlobalRepoFrom{}), admin.UpdateRepoPost)
|
||||
})
|
||||
|
||||
m.Group("/packages", func() {
|
||||
|
||||
@ -18,6 +18,12 @@ import (
|
||||
"gitea.com/go-chi/binding"
|
||||
)
|
||||
|
||||
// UpdateGlobalRepoFrom for updating global repository setting
|
||||
type UpdateGlobalRepoFrom struct {
|
||||
RepoSizeLimit string
|
||||
EnableSizeLimit bool
|
||||
}
|
||||
|
||||
// CreateRepoForm form for creating repository
|
||||
type CreateRepoForm struct {
|
||||
UID int64 `binding:"Required"`
|
||||
@ -43,6 +49,7 @@ type CreateRepoForm struct {
|
||||
|
||||
ForkSingleBranch string
|
||||
ObjectFormatName string
|
||||
SizeLimit int64
|
||||
}
|
||||
|
||||
// Validate validates the fields
|
||||
@ -106,6 +113,7 @@ type RepoSettingForm struct {
|
||||
PushMirrorInterval string
|
||||
Template bool
|
||||
EnablePrune bool
|
||||
RepoSizeLimit int64
|
||||
|
||||
// Advanced settings
|
||||
EnableCode bool
|
||||
|
||||
@ -52,6 +52,7 @@ type CreateRepoOptions struct {
|
||||
TrustModel repo_model.TrustModelType
|
||||
MirrorInterval string
|
||||
ObjectFormatName string
|
||||
SizeLimit int64
|
||||
}
|
||||
|
||||
func prepareRepoCommit(ctx context.Context, repo *repo_model.Repository, tmpDir string, opts CreateRepoOptions) error {
|
||||
@ -246,6 +247,7 @@ func CreateRepositoryDirectly(ctx context.Context, doer, owner *user_model.User,
|
||||
Status: opts.Status,
|
||||
IsEmpty: !opts.AutoInit,
|
||||
TrustModel: opts.TrustModel,
|
||||
SizeLimit: opts.SizeLimit,
|
||||
IsMirror: opts.IsMirror,
|
||||
DefaultBranch: opts.DefaultBranch,
|
||||
DefaultWikiBranch: setting.Repository.DefaultBranch,
|
||||
|
||||
@ -1,5 +1,26 @@
|
||||
{{template "admin/layout_head" (dict "ctxData" . "pageClass" "admin")}}
|
||||
<div class="admin-setting-content">
|
||||
<h4 class="ui top attached header">
|
||||
{{.locale.Tr "admin.config.repository_config"}}
|
||||
</h4>
|
||||
<div class="ui attached segment">
|
||||
<form class="ui form" action="{{.Link}}" method="post">
|
||||
{{.CsrfTokenHtml}}
|
||||
<div class="inline field">
|
||||
<label>{{.locale.Tr "admin.config.enable_size_limit"}}</label>
|
||||
<div class="ui checkbox">
|
||||
<input name="enable_size_limit" type="checkbox" {{if .EnableSizeLimit}}checked{{end}}>
|
||||
</div>
|
||||
</div>
|
||||
<div class="inline field {{if .Err_Repo_Size_Limit}}error{{end}}">
|
||||
<label for="repo_size_limit">{{.locale.Tr "admin.config.repo_size_limit"}}</label>
|
||||
<input id="repo_size_limit" name="repo_size_limit" value="{{.RepoSizeLimit}}">
|
||||
</div>
|
||||
<div class="field">
|
||||
<button class="ui green button">{{$.locale.Tr "repo.settings.update_settings"}}</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<h4 class="ui top attached header">
|
||||
{{ctx.Locale.Tr "admin.repos.repo_manage_panel"}} ({{ctx.Locale.Tr "admin.total" .Total}})
|
||||
<div class="ui right">
|
||||
|
||||
@ -13,8 +13,18 @@
|
||||
<input name="repo_name" value="{{.Repository.Name}}" data-repo-name="{{.Repository.Name}}" required>
|
||||
</div>
|
||||
<div class="inline field">
|
||||
<label>{{ctx.Locale.Tr "repo.repo_size"}}</label>
|
||||
<span {{if not (eq .Repository.Size 0)}} data-tooltip-content="{{.Repository.SizeDetailsString}}"{{end}}>{{FileSize .Repository.Size}}</span>
|
||||
<label>{{ctx.locale.Tr "repo.repo_size"}}</label>
|
||||
<span {{if .Err_RepoSize}}class="ui text red"{{end}} {{if not (eq .Repository.Size 0)}} data-tooltip-content="{{.Repository.SizeDetailsString}}"{{end}}>{{FileSize .Repository.Size}}
|
||||
{{if and .ActualSizeLimit .EnableSizeLimit}}
|
||||
/{{FileSize .ActualSizeLimit}}
|
||||
{{end}}
|
||||
</span>
|
||||
</div>
|
||||
<div class="field {{if .Err_RepoSizeLimit}}error{{end}}" {{if not .IsAdmin}}style="display:none;"{{end}}>
|
||||
<label for="repo_size_limit">{{.locale.Tr "repo.repo_size_limit"}}</label>
|
||||
<input id="repo_size_limit" name="repo_size_limit"
|
||||
{{if not .EnableSizeLimit}}class="ui text light grey"{{end}}
|
||||
type="number" value="{{.Repository.SizeLimit}}" data-repo-size-limit="{{.Repository.SizeLimit}}">
|
||||
</div>
|
||||
<div class="inline field">
|
||||
<label>{{ctx.Locale.Tr "repo.template"}}</label>
|
||||
|
||||
12
templates/swagger/v1_json.tmpl
generated
12
templates/swagger/v1_json.tmpl
generated
@ -23633,6 +23633,12 @@
|
||||
"type": "string",
|
||||
"x-go-name": "Readme"
|
||||
},
|
||||
"size_limit": {
|
||||
"description": "SizeLimit of the repository",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"x-go-name": "SizeLimit"
|
||||
},
|
||||
"template": {
|
||||
"description": "Whether the repository is template",
|
||||
"type": "boolean",
|
||||
@ -24754,6 +24760,12 @@
|
||||
"type": "string",
|
||||
"x-go-name": "ProjectsMode"
|
||||
},
|
||||
"size_limit": {
|
||||
"description": "SizeLimit of the repository.",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"x-go-name": "SizeLimit"
|
||||
},
|
||||
"template": {
|
||||
"description": "either `true` to make this repository a template or `false` to make it a normal repository",
|
||||
"type": "boolean",
|
||||
|
||||
@ -136,6 +136,23 @@ func doAPIForkRepository(ctx APITestContext, username string, callback ...func(*
|
||||
}
|
||||
}
|
||||
|
||||
func doAPIGetRepositorySize(ctx APITestContext, owner, repo string) func(*testing.T) int64 {
|
||||
return func(t *testing.T) int64 {
|
||||
urlStr := fmt.Sprintf("/api/v1/repos/%s/%s", ctx.Username, ctx.Reponame)
|
||||
|
||||
req := NewRequest(t, "GET", urlStr).
|
||||
AddTokenAuth(ctx.Token)
|
||||
if ctx.ExpectedCode != 0 {
|
||||
ctx.Session.MakeRequest(t, req, ctx.ExpectedCode)
|
||||
}
|
||||
resp := ctx.Session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
var repository api.Repository
|
||||
DecodeJSON(t, resp, &repository)
|
||||
return int64(repository.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func doAPIGetRepository(ctx APITestContext, callback ...func(*testing.T, api.Repository)) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
req := NewRequest(t, "GET", fmt.Sprintf("/api/v1/repos/%s/%s", ctx.Username, ctx.Reponame)).
|
||||
@ -451,3 +468,18 @@ func doAPIAddRepoToOrganizationTeam(ctx APITestContext, teamID int64, orgName, r
|
||||
ctx.Session.MakeRequest(t, req, http.StatusNoContent)
|
||||
}
|
||||
}
|
||||
|
||||
func doAPISetRepoSizeLimit(ctx APITestContext, owner, repo string, size int64) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
urlStr := fmt.Sprintf("/api/v1/repos/%s/%s",
|
||||
owner, repo)
|
||||
req := NewRequestWithJSON(t, http.MethodPatch, urlStr, &api.EditRepoOption{SizeLimit: &size}).
|
||||
AddTokenAuth(ctx.Token)
|
||||
|
||||
if ctx.ExpectedCode != 0 {
|
||||
ctx.Session.MakeRequest(t, req, ctx.ExpectedCode)
|
||||
return
|
||||
}
|
||||
ctx.Session.MakeRequest(t, req, 200)
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,6 +85,39 @@ func testGitGeneral(t *testing.T, u *url.URL) {
|
||||
rawTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||
mediaTest(t, &httpContext, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||
|
||||
t.Run("SizeLimit", func(t *testing.T) {
|
||||
dstForkedPath := t.TempDir()
|
||||
setting.SaveGlobalRepositorySetting(true, 0)
|
||||
t.Run("Under", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
doCommitAndPush(t, testFileSizeSmall, dstPath, "data-file-")
|
||||
})
|
||||
t.Run("Over", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
u.Path = forkedUserCtx.GitPath()
|
||||
u.User = url.UserPassword(forkedUserCtx.Username, userPassword)
|
||||
t.Run("Clone", doGitClone(dstForkedPath, u))
|
||||
t.Run("APISetRepoSizeLimit", doAPISetRepoSizeLimit(forkedUserCtx, forkedUserCtx.Username, forkedUserCtx.Reponame, testFileSizeSmall))
|
||||
doCommitAndPushWithExpectedError(t, testFileSizeLarge, dstForkedPath, "data-file-")
|
||||
})
|
||||
t.Run("UnderAfterResize", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
t.Run("APISetRepoSizeLimit", doAPISetRepoSizeLimit(forkedUserCtx, forkedUserCtx.Username, forkedUserCtx.Reponame, testFileSizeLarge*10))
|
||||
doCommitAndPush(t, testFileSizeSmall, dstPath, "data-file-")
|
||||
})
|
||||
t.Run("Deletion", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
doCommitAndPush(t, testFileSizeSmall, dstPath, "data-file-")
|
||||
bigFileName := doCommitAndPush(t, testFileSizeLarge, dstPath, "data-file-")
|
||||
oldRepoSize := doGetRemoteRepoSizeViaAPI(t, forkedUserCtx)
|
||||
lastCommitID := doGetAddCommitID(t, dstPath, bigFileName)
|
||||
doDeleteAndPush(t, dstPath, bigFileName)
|
||||
doRebaseCommitAndPush(t, dstPath, lastCommitID)
|
||||
newRepoSize := doGetRemoteRepoSizeViaAPI(t, forkedUserCtx)
|
||||
assert.LessOrEqual(t, newRepoSize, oldRepoSize)
|
||||
setting.SaveGlobalRepositorySetting(false, 0)
|
||||
})
|
||||
})
|
||||
t.Run("CreateAgitFlowPull", doCreateAgitFlowPull(dstPath, &httpContext, "test/head"))
|
||||
t.Run("CreateProtectedBranch", doCreateProtectedBranch(&httpContext, dstPath))
|
||||
t.Run("BranchProtectMerge", doBranchProtectPRMerge(&httpContext, dstPath))
|
||||
@ -97,6 +130,8 @@ func testGitGeneral(t *testing.T, u *url.URL) {
|
||||
mediaTest(t, &forkedUserCtx, pushedFilesStandard[0], pushedFilesStandard[1], pushedFilesLFS[0], pushedFilesLFS[1])
|
||||
})
|
||||
|
||||
u.Path = httpContext.GitPath()
|
||||
u.User = url.UserPassword(username, userPassword)
|
||||
t.Run("PushCreate", doPushCreate(httpContext, u))
|
||||
})
|
||||
t.Run("SSH", func(t *testing.T) {
|
||||
@ -323,6 +358,50 @@ func lockFileTest(t *testing.T, filename, repoPath string) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func doGetRemoteRepoSizeViaAPI(t *testing.T, ctx APITestContext) int64 {
|
||||
return doAPIGetRepositorySize(ctx, ctx.Username, ctx.Reponame)(t)
|
||||
}
|
||||
|
||||
func doDeleteAndPush(t *testing.T, repoPath, filename string) {
|
||||
_, _, err := gitcmd.NewCommand("rm").AddDashesAndList(filename).WithDir(repoPath).RunStdString(t.Context()) // Delete
|
||||
assert.NoError(t, err)
|
||||
signature := git.Signature{
|
||||
Email: "user2@example.com",
|
||||
Name: "User Two",
|
||||
When: time.Now(),
|
||||
}
|
||||
_, _, err = gitcmd.NewCommand("status").WithDir(repoPath).RunStdString(t.Context())
|
||||
assert.NoError(t, err)
|
||||
err2 := git.CommitChanges(t.Context(), repoPath, git.CommitChangesOptions{ // Commit
|
||||
Committer: &signature,
|
||||
Author: &signature,
|
||||
Message: "Delete Commit",
|
||||
})
|
||||
assert.NoError(t, err2)
|
||||
_, _, err = gitcmd.NewCommand("status").WithDir(repoPath).RunStdString(t.Context())
|
||||
assert.NoError(t, err)
|
||||
_, _, err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).RunStdString(t.Context()) // Push
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func doGetAddCommitID(t *testing.T, repoPath, filename string) string {
|
||||
output, _, err := gitcmd.NewCommand("log", "origin", "master").WithDir(repoPath).RunStdString(t.Context()) // Push
|
||||
assert.NoError(t, err)
|
||||
list := strings.Fields(output)
|
||||
assert.LessOrEqual(t, 2, len(list))
|
||||
return list[1]
|
||||
}
|
||||
|
||||
func doRebaseCommitAndPush(t *testing.T, repoPath, commitID string) {
|
||||
command := gitcmd.NewCommand("rebase", "--interactive").AddDashesAndList(commitID)
|
||||
env := os.Environ()
|
||||
env = append(env, "GIT_SEQUENCE_EDITOR=true")
|
||||
_, _, err := command.WithDir(repoPath).WithEnv(env).RunStdString(t.Context()) // Push
|
||||
assert.NoError(t, err)
|
||||
_, _, err = gitcmd.NewCommand("push", "origin", "master", "-f").WithDir(repoPath).RunStdString(t.Context()) // Push
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func doCommitAndPush(t *testing.T, size int, repoPath, prefix string) string {
|
||||
name, err := generateCommitWithNewData(t.Context(), size, repoPath, "user2@example.com", "User Two", prefix)
|
||||
assert.NoError(t, err)
|
||||
@ -331,6 +410,14 @@ func doCommitAndPush(t *testing.T, size int, repoPath, prefix string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
func doCommitAndPushWithExpectedError(t *testing.T, size int, repoPath, prefix string) string {
|
||||
name, err := generateCommitWithNewData(t.Context(), size, repoPath, "user2@example.com", "User Two", prefix)
|
||||
assert.NoError(t, err)
|
||||
_, _, err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).RunStdString(t.Context()) // Push
|
||||
assert.Error(t, err)
|
||||
return name
|
||||
}
|
||||
|
||||
func generateCommitWithNewData(ctx context.Context, size int, repoPath, email, fullName, prefix string) (string, error) {
|
||||
tmpFile, err := os.CreateTemp(repoPath, prefix)
|
||||
if err != nil {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user