forgejo/models/admin/task.go
oliverpool 67df538958 feat: cache derived keys for faster keying (#10114)
Currently `DeriveKey` is called every time that a secret must be encoded/decoded. Since this function is deterministic, its result can be cached to allow a 250x speedup (the original took less than half a microsecond, so this more of a micro-optimization...).

```
go test -bench=.
goos: linux
goarch: amd64
pkg: forgejo.org/modules/keying
cpu: Intel(R) Core(TM) Ultra 5 125H
BenchmarkExpandPRK-18            2071627               564.2 ns/op
BenchmarkExpandPRKOnce-18       541438192                2.206 ns/op
PASS
ok      forgejo.org/modules/keying      2.369s
```

## Other changes

- Since the keys can be constructed once, it simplifies a bit the callsites (`keying.TOTP.Encrypt(...)` instead of `keying.DeriveKey(keying.ContextTOTP).Encrypt(...)`)
- All `Encrypt`/`Decrypt` calls will panic forever if called before `Init` has been called (current it panics as long as `Init` has not been called)
- Calling `Init` twice with different keys will trigger a panic (currently racy)
- Calling `Decrypt` with a short ciphertext does not panic anymore (like when calling with long-enough garbage)

Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/10114
Reviewed-by: Gusted <gusted@noreply.codeberg.org>
Co-authored-by: oliverpool <git@olivier.pfad.fr>
Co-committed-by: oliverpool <git@olivier.pfad.fr>
2025-11-16 14:29:14 +01:00

258 lines
6.8 KiB
Go

// Copyright 2019 Gitea. All rights reserved.
// SPDX-License-Identifier: MIT
package admin
import (
"context"
"encoding/base64"
"fmt"
"forgejo.org/models/db"
repo_model "forgejo.org/models/repo"
user_model "forgejo.org/models/user"
"forgejo.org/modules/json"
"forgejo.org/modules/keying"
"forgejo.org/modules/migration"
"forgejo.org/modules/structs"
"forgejo.org/modules/timeutil"
"forgejo.org/modules/util"
)
// Task represents a task
type Task struct {
ID int64
DoerID int64 `xorm:"index"` // operator
Doer *user_model.User `xorm:"-"`
OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"`
Repo *repo_model.Repository `xorm:"-"`
Type structs.TaskType
Status structs.TaskStatus `xorm:"index"`
StartTime timeutil.TimeStamp
EndTime timeutil.TimeStamp
PayloadContent string `xorm:"TEXT"`
Message string `xorm:"TEXT"` // if task failed, saved the error reason, it could be a JSON string of TranslatableMessage or a plain message
Created timeutil.TimeStamp `xorm:"created"`
}
func init() {
db.RegisterModel(new(Task))
}
// TranslatableMessage represents JSON struct that can be translated with a Locale
type TranslatableMessage struct {
Format string
Args []any `json:",omitempty"`
}
// LoadRepo loads repository of the task
func (task *Task) LoadRepo(ctx context.Context) error {
if task.Repo != nil {
return nil
}
var repo repo_model.Repository
has, err := db.GetEngine(ctx).ID(task.RepoID).Get(&repo)
if err != nil {
return err
} else if !has {
return repo_model.ErrRepoNotExist{
ID: task.RepoID,
}
}
task.Repo = &repo
return nil
}
// LoadDoer loads do user
func (task *Task) LoadDoer(ctx context.Context) error {
if task.Doer != nil {
return nil
}
var doer user_model.User
has, err := db.GetEngine(ctx).ID(task.DoerID).Get(&doer)
if err != nil {
return err
} else if !has {
return user_model.ErrUserNotExist{
UID: task.DoerID,
}
}
task.Doer = &doer
return nil
}
// LoadOwner loads owner user
func (task *Task) LoadOwner(ctx context.Context) error {
if task.Owner != nil {
return nil
}
var owner user_model.User
has, err := db.GetEngine(ctx).ID(task.OwnerID).Get(&owner)
if err != nil {
return err
} else if !has {
return user_model.ErrUserNotExist{
UID: task.OwnerID,
}
}
task.Owner = &owner
return nil
}
// UpdateCols updates some columns
func (task *Task) UpdateCols(ctx context.Context, cols ...string) error {
_, err := db.GetEngine(ctx).ID(task.ID).Cols(cols...).Update(task)
return err
}
// MigrateConfig returns task config when migrate repository
func (task *Task) MigrateConfig() (*migration.MigrateOptions, error) {
if task.Type == structs.TaskTypeMigrateRepo {
var opts migration.MigrateOptions
err := json.Unmarshal([]byte(task.PayloadContent), &opts)
if err != nil {
return nil, err
}
key := keying.MigrateTask
// decrypt credentials
if opts.CloneAddrEncrypted != "" {
encryptedCloneAddr, err := base64.RawStdEncoding.DecodeString(opts.CloneAddrEncrypted)
if err != nil {
return nil, err
}
cloneAddr, err := key.Decrypt(encryptedCloneAddr, keying.ColumnAndJSONSelectorAndID("payload_content", "clone_addr_encrypted", task.ID))
if err != nil {
return nil, err
}
opts.CloneAddr = string(cloneAddr)
}
if opts.AuthPasswordEncrypted != "" {
encryptedAuthPassword, err := base64.RawStdEncoding.DecodeString(opts.AuthPasswordEncrypted)
if err != nil {
return nil, err
}
authPassword, err := key.Decrypt(encryptedAuthPassword, keying.ColumnAndJSONSelectorAndID("payload_content", "auth_password_encrypted", task.ID))
if err != nil {
return nil, err
}
opts.AuthPassword = string(authPassword)
}
if opts.AuthTokenEncrypted != "" {
encryptedAuthToken, err := base64.RawStdEncoding.DecodeString(opts.AuthTokenEncrypted)
if err != nil {
return nil, err
}
authToken, err := key.Decrypt(encryptedAuthToken, keying.ColumnAndJSONSelectorAndID("payload_content", "auth_token_encrypted", task.ID))
if err != nil {
return nil, err
}
opts.AuthToken = string(authToken)
}
return &opts, nil
}
return nil, fmt.Errorf("Task type is %s, not Migrate Repo", task.Type.Name())
}
// ErrTaskDoesNotExist represents a "TaskDoesNotExist" kind of error.
type ErrTaskDoesNotExist struct {
ID int64
RepoID int64
Type structs.TaskType
}
// IsErrTaskDoesNotExist checks if an error is a ErrTaskDoesNotExist.
func IsErrTaskDoesNotExist(err error) bool {
_, ok := err.(ErrTaskDoesNotExist)
return ok
}
func (err ErrTaskDoesNotExist) Error() string {
return fmt.Sprintf("task does not exist [id: %d, repo_id: %d, type: %d]",
err.ID, err.RepoID, err.Type)
}
func (err ErrTaskDoesNotExist) Unwrap() error {
return util.ErrNotExist
}
// GetMigratingTask returns the migrating task by repo's id
func GetMigratingTask(ctx context.Context, repoID int64) (*Task, error) {
task := Task{
RepoID: repoID,
Type: structs.TaskTypeMigrateRepo,
}
has, err := db.GetEngine(ctx).Get(&task)
if err != nil {
return nil, err
} else if !has {
return nil, ErrTaskDoesNotExist{0, repoID, task.Type}
}
return &task, nil
}
// GetMigratingTaskByID returns the migrating task by repo's id
func GetMigratingTaskByID(ctx context.Context, id, doerID int64) (*Task, *migration.MigrateOptions, error) {
task := Task{
ID: id,
DoerID: doerID,
Type: structs.TaskTypeMigrateRepo,
}
has, err := db.GetEngine(ctx).Get(&task)
if err != nil {
return nil, nil, err
} else if !has {
return nil, nil, ErrTaskDoesNotExist{id, 0, task.Type}
}
var opts migration.MigrateOptions
if err := json.Unmarshal([]byte(task.PayloadContent), &opts); err != nil {
return nil, nil, err
}
return &task, &opts, nil
}
// CreateTask creates a task on database
func CreateTask(ctx context.Context, task *Task) error {
return db.Insert(ctx, task)
}
// FinishMigrateTask updates database when migrate task finished
func FinishMigrateTask(ctx context.Context, task *Task) error {
task.Status = structs.TaskStatusFinished
task.EndTime = timeutil.TimeStampNow()
// delete credentials when we're done, they're a liability.
conf, err := task.MigrateConfig()
if err != nil {
return err
}
conf.AuthPassword = ""
conf.AuthToken = ""
conf.CloneAddr = util.SanitizeCredentialURLs(conf.CloneAddr)
conf.AuthPasswordEncrypted = ""
conf.AuthTokenEncrypted = ""
conf.CloneAddrEncrypted = ""
confBytes, err := json.Marshal(conf)
if err != nil {
return err
}
task.PayloadContent = string(confBytes)
_, err = db.GetEngine(ctx).ID(task.ID).Cols("status", "end_time", "payload_content").Update(task)
return err
}