mirror of
https://github.com/caddyserver/caddy.git
synced 2025-10-19 07:43:17 +00:00
Compare commits
146 commits
v2.10.0-be
...
master
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f5f25d845a | ||
![]() |
1ce2a13ad1 | ||
![]() |
d7185fd002 | ||
![]() |
7fb39ec1e5 | ||
![]() |
10ac7da037 | ||
![]() |
d115cd1042 | ||
![]() |
de6b78009b | ||
![]() |
2ec28bca43 | ||
![]() |
178294e9d7 | ||
![]() |
13a4ec7597 | ||
![]() |
2f1d270968 | ||
![]() |
3c003deec6 | ||
![]() |
afbdcec08b | ||
![]() |
65e0ddc221 | ||
![]() |
b2ab419922 | ||
![]() |
bc0e184130 | ||
![]() |
1e82f9652e | ||
![]() |
25be2f26fc | ||
![]() |
0c8798fce3 | ||
![]() |
f5c3094050 | ||
![]() |
39ace450de | ||
![]() |
0ba8786b35 | ||
![]() |
bcd4055e89 | ||
![]() |
b462615439 | ||
![]() |
012b4b3d40 | ||
![]() |
d9cc24f3df | ||
![]() |
38848f7f25 | ||
![]() |
5473eb95d8 | ||
![]() |
2d0f3f887b | ||
![]() |
39357d3e5c | ||
![]() |
3553cfb6ad | ||
![]() |
806fef85be | ||
![]() |
6d73d85c1f | ||
![]() |
e0a8f9541d | ||
![]() |
b866a9e099 | ||
![]() |
1db26128a6 | ||
![]() |
02c9f0ff90 | ||
![]() |
63ec1f4e1c | ||
![]() |
293de94f34 | ||
![]() |
d8d359eca2 | ||
![]() |
11a95cee6d | ||
![]() |
b7c022a61a | ||
![]() |
5e2953670e | ||
![]() |
551f793700 | ||
![]() |
4564261d83 | ||
![]() |
16fe83c7af | ||
![]() |
3723e89585 | ||
![]() |
14a63a26b9 | ||
![]() |
67debd0e11 | ||
![]() |
b9710c6af4 | ||
![]() |
493898d9bd | ||
![]() |
1c596e3c5a | ||
![]() |
f11c780fdc | ||
![]() |
fdf610850b | ||
![]() |
5125fbed41 | ||
![]() |
b15ed9b084 | ||
![]() |
05acc5131e | ||
![]() |
7590c9ca1b | ||
![]() |
b898873b90 | ||
![]() |
09b53a753c | ||
![]() |
4bfc3b95b5 | ||
![]() |
49dac61b07 | ||
![]() |
19ff47a63b | ||
![]() |
007f4066f6 | ||
![]() |
42c888ee1d | ||
![]() |
731e6c2482 | ||
![]() |
0badb071ef | ||
![]() |
e4447c4ba7 | ||
![]() |
5bc2afbbb6 | ||
![]() |
4fd2acb5c9 | ||
![]() |
3bd413546b | ||
![]() |
5b727bde29 | ||
![]() |
fe41ff3c5b | ||
![]() |
b7ae39e906 | ||
![]() |
ab3b2d64ba | ||
![]() |
6de2c9e135 | ||
![]() |
8ba7eefd07 | ||
![]() |
291987ac23 | ||
![]() |
790f3e0885 | ||
![]() |
bbf1dfcea2 | ||
![]() |
aff88d4b26 | ||
![]() |
1209b5c566 | ||
![]() |
a067fb1760 | ||
![]() |
77dd12cc78 | ||
![]() |
c712cfcd76 | ||
![]() |
33c88bd2bb | ||
![]() |
11c6daecd7 | ||
![]() |
3b4d966fba | ||
![]() |
070d454c0d | ||
![]() |
2f0fc62b34 | ||
![]() |
3d0b4fac5a | ||
![]() |
1a0f168b6e | ||
![]() |
7a33f481f1 | ||
![]() |
e633d013f6 | ||
![]() |
fe26751491 | ||
![]() |
4b01d77b81 | ||
![]() |
0f209f62eb | ||
![]() |
1481c0411a | ||
![]() |
092913a7a5 | ||
![]() |
7099892958 | ||
![]() |
45c9341deb | ||
![]() |
e039a5bb5c | ||
![]() |
5b2eb66418 | ||
![]() |
a76d005a94 | ||
![]() |
8524386737 | ||
![]() |
94147caf31 | ||
![]() |
716d72e475 | ||
![]() |
44d078b670 | ||
![]() |
051e73aefc | ||
![]() |
9f7148392a | ||
![]() |
320c57291d | ||
![]() |
aa3d20be3e | ||
![]() |
54d03ced48 | ||
![]() |
89ed5f44de | ||
![]() |
105eee671c | ||
![]() |
737936c06b | ||
![]() |
a6d488a15b | ||
![]() |
fb22a26b1a | ||
![]() |
1bfa111552 | ||
![]() |
35c8c2d92d | ||
![]() |
0b2802faa4 | ||
![]() |
5be77d07ab | ||
![]() |
137711ae3e | ||
![]() |
f297bc0a04 | ||
![]() |
6c38ae7381 | ||
![]() |
def9db1f16 | ||
![]() |
ce926b87ed | ||
![]() |
b06a9496d1 | ||
![]() |
9becf61a9f | ||
![]() |
5a6b2f8d1d | ||
![]() |
ea77a9ab67 | ||
![]() |
7672b7848f | ||
![]() |
86c620fb4e | ||
![]() |
782a3c7ac6 | ||
![]() |
173573035c | ||
![]() |
7b1f00c330 | ||
![]() |
8dc76676fb | ||
![]() |
e276994174 | ||
![]() |
b3e692ed09 | ||
![]() |
55c89ccf2a | ||
![]() |
1f8dab572c | ||
![]() |
2ac09fdb20 | ||
![]() |
dccf3d8982 | ||
![]() |
af2d33afbb | ||
![]() |
39262f8663 | ||
![]() |
49f9af9a4a |
202 changed files with 7034 additions and 2470 deletions
31
.github/ISSUE_TEMPLATE/ISSUE.yml
vendored
Normal file
31
.github/ISSUE_TEMPLATE/ISSUE.yml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
name: Issue
|
||||
description: An actionable development item, like a bug report or feature request
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for opening an issue! This is for actionable development items like bug reports and feature requests.
|
||||
If you have a question about using Caddy, please [post on our forums](https://caddy.community) instead.
|
||||
- type: textarea
|
||||
id: content
|
||||
attributes:
|
||||
label: Issue Details
|
||||
placeholder: Describe the issue here. Be specific by providing complete logs and minimal instructions to reproduce, or a thoughtful proposal, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: assistance-disclosure
|
||||
attributes:
|
||||
label: Assistance Disclosure
|
||||
description: "Our project allows assistance by AI/LLM tools as long as it is disclosed and described so we can better respond. Please certify whether you have used any such tooling related to this issue:"
|
||||
options:
|
||||
-
|
||||
- AI used
|
||||
- AI not used
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: assistance-description
|
||||
attributes:
|
||||
label: If AI was used, describe the extent to which it was used.
|
||||
description: 'Examples: "ChatGPT translated from my native language" or "Claude proposed this change/feature"'
|
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Caddy forum
|
||||
url: https://caddy.community
|
||||
about: If you have questions (or answers!) about using Caddy, please use our forum
|
4
.github/SECURITY.md
vendored
4
.github/SECURITY.md
vendored
|
@ -48,9 +48,9 @@ We consider publicly-registered domain names to be public information. This nece
|
|||
|
||||
It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, depending on available resources. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. (We get a lot of invalid reports.) Thank you for understanding.
|
||||
|
||||
When you are ready, please email Matt Holt (the author) directly: matt at dyanim dot com.
|
||||
When you are ready, please submit a [new private vulnerability report](https://github.com/caddyserver/caddy/security/advisories/new).
|
||||
|
||||
Please don't encrypt the email body. It only makes the process more complicated.
|
||||
Please don't encrypt the message. It only makes the process more complicated.
|
||||
|
||||
Please also understand that due to our nature as an open source project, we do not have a budget to award security bounties. We can only thank you.
|
||||
|
||||
|
|
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
|
@ -3,5 +3,20 @@ version: 2
|
|||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
open-pull-requests-limit: 1
|
||||
groups:
|
||||
actions-deps:
|
||||
patterns:
|
||||
- "*"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
open-pull-requests-limit: 1
|
||||
groups:
|
||||
all-updates:
|
||||
patterns:
|
||||
- "*"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
|
|
29
.github/pull_request_template.md
vendored
Normal file
29
.github/pull_request_template.md
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
|
||||
|
||||
|
||||
## Assistance Disclosure
|
||||
<!--
|
||||
Thank you for contributing! Please note:
|
||||
|
||||
The use of AI/LLM tools is allowed so long as it is disclosed, so
|
||||
that we can provide better code review and maintain project quality.
|
||||
|
||||
If you used AI/LLM tooling in any way related to this PR, please
|
||||
let us know to what extent it was utilized.
|
||||
|
||||
Examples:
|
||||
|
||||
"No AI was used."
|
||||
"I wrote the code, but Claude generated the tests."
|
||||
"I consulted ChatGPT for a solution, but I authored/coded it myself."
|
||||
"Cody generated the code, and I verified it is correct."
|
||||
"Copilot provided tab completion for code and comments."
|
||||
|
||||
We expect that you have vetted your contributions for correctness.
|
||||
Additionally, signing our CLA certifies that you have the rights to
|
||||
contribute this change.
|
||||
|
||||
Replace the text below with your disclosure:
|
||||
-->
|
||||
|
||||
_This PR is missing an assistance disclosure._
|
30
.github/workflows/ai.yml
vendored
Normal file
30
.github/workflows/ai.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
name: AI Moderator
|
||||
permissions: read-all
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
jobs:
|
||||
spam-detection:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
models: read
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
- uses: github/ai-moderator@6bcdb2a79c2e564db8d76d7d4439d91a044c4eb6
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
spam-label: 'spam'
|
||||
ai-label: 'ai-generated'
|
||||
minimize-detected-comments: true
|
||||
# Built-in prompt configuration (all enabled by default)
|
||||
enable-spam-detection: true
|
||||
enable-link-spam-detection: true
|
||||
enable-ai-detection: true
|
||||
# custom-prompt-path: '.github/prompts/my-custom.prompt.yml' # Optional
|
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
|
@ -13,9 +13,13 @@ on:
|
|||
- 2.*
|
||||
|
||||
env:
|
||||
GOFLAGS: '-tags=nobadger,nomysql,nopgx'
|
||||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
|
@ -27,13 +31,13 @@ jobs:
|
|||
- mac
|
||||
- windows
|
||||
go:
|
||||
- '1.24'
|
||||
- '1.25'
|
||||
|
||||
include:
|
||||
# Set the minimum Go patch version for the given Go minor
|
||||
# Usable via ${{ matrix.GO_SEMVER }}
|
||||
- go: '1.24'
|
||||
GO_SEMVER: '~1.24.1'
|
||||
- go: '1.25'
|
||||
GO_SEMVER: '~1.25.0'
|
||||
|
||||
# Set some variables per OS, usable via ${{ matrix.VAR }}
|
||||
# OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)
|
||||
|
@ -55,13 +59,21 @@ jobs:
|
|||
SUCCESS: 'True'
|
||||
|
||||
runs-on: ${{ matrix.OS_LABEL }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
actions: write # to allow uploading artifacts and cache
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
|
@ -99,7 +111,7 @@ jobs:
|
|||
env:
|
||||
CGO_ENABLED: 0
|
||||
run: |
|
||||
go build -tags nobadger,nomysql,nopgx -trimpath -ldflags="-w -s" -v
|
||||
go build -trimpath -ldflags="-w -s" -v
|
||||
|
||||
- name: Smoke test Caddy
|
||||
working-directory: ./cmd/caddy
|
||||
|
@ -108,7 +120,7 @@ jobs:
|
|||
./caddy stop
|
||||
|
||||
- name: Publish Build Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
|
||||
path: ${{ matrix.CADDY_BIN_PATH }}
|
||||
|
@ -122,7 +134,7 @@ jobs:
|
|||
# continue-on-error: true
|
||||
run: |
|
||||
# (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out
|
||||
go test -tags nobadger,nomysql,nopgx -v -coverprofile="cover-profile.out" -short -race ./...
|
||||
go test -v -coverprofile="cover-profile.out" -short -race ./...
|
||||
# echo "status=$?" >> $GITHUB_OUTPUT
|
||||
|
||||
# Relevant step if we reinvestigate publishing test/coverage reports
|
||||
|
@ -142,12 +154,21 @@ jobs:
|
|||
|
||||
s390x-test:
|
||||
name: test (s390x on IBM Z)
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
|
||||
continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
allowed-endpoints: ci-s390x.caddyserver.com:22
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Run Tests
|
||||
run: |
|
||||
set +e
|
||||
|
@ -170,7 +191,7 @@ jobs:
|
|||
retries=3
|
||||
exit_code=0
|
||||
while ((retries > 0)); do
|
||||
CGO_ENABLED=0 go test -p 1 -tags nobadger,nomysql,nopgx -v ./...
|
||||
CGO_ENABLED=0 go test -p 1 -v ./...
|
||||
exit_code=$?
|
||||
if ((exit_code == 0)); then
|
||||
break
|
||||
|
@ -194,25 +215,33 @@ jobs:
|
|||
|
||||
goreleaser-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
|
||||
with:
|
||||
version: latest
|
||||
args: check
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: "~1.24"
|
||||
go-version: "~1.25"
|
||||
check-latest: true
|
||||
- name: Install xcaddy
|
||||
run: |
|
||||
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
|
||||
xcaddy version
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
- uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
|
||||
with:
|
||||
version: latest
|
||||
args: build --single-target --snapshot
|
||||
|
|
27
.github/workflows/cross-build.yml
vendored
27
.github/workflows/cross-build.yml
vendored
|
@ -11,9 +11,14 @@ on:
|
|||
- 2.*
|
||||
|
||||
env:
|
||||
GOFLAGS: '-tags=nobadger,nomysql,nopgx'
|
||||
CGO_ENABLED: '0'
|
||||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
|
@ -31,22 +36,30 @@ jobs:
|
|||
- 'darwin'
|
||||
- 'netbsd'
|
||||
go:
|
||||
- '1.24'
|
||||
- '1.25'
|
||||
|
||||
include:
|
||||
# Set the minimum Go patch version for the given Go minor
|
||||
# Usable via ${{ matrix.GO_SEMVER }}
|
||||
- go: '1.24'
|
||||
GO_SEMVER: '~1.24.1'
|
||||
- go: '1.25'
|
||||
GO_SEMVER: '~1.25.0'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
|
@ -63,11 +76,9 @@ jobs:
|
|||
|
||||
- name: Run Build
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }}
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
working-directory: ./cmd/caddy
|
||||
run: |
|
||||
GOOS=$GOOS GOARCH=$GOARCH go build -tags=nobadger,nomysql,nopgx -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
|
||||
run: go build -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
|
||||
|
|
48
.github/workflows/lint.yml
vendored
48
.github/workflows/lint.yml
vendored
|
@ -44,14 +44,19 @@ jobs:
|
|||
runs-on: ${{ matrix.OS_LABEL }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
go-version: '~1.24'
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: '~1.25'
|
||||
check-latest: true
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
version: latest
|
||||
|
||||
|
@ -62,10 +67,39 @@ jobs:
|
|||
# only-new-issues: true
|
||||
|
||||
govulncheck:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: govulncheck
|
||||
uses: golang/govulncheck-action@v1
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
go-version-input: '~1.24.1'
|
||||
egress-policy: audit
|
||||
|
||||
- name: govulncheck
|
||||
uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1.0.4
|
||||
with:
|
||||
go-version-input: '~1.25.0'
|
||||
check-latest: true
|
||||
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@56339e523c0409420f6c2c9a2f4292bbb3c07dd3 # v4.8.0
|
||||
with:
|
||||
comment-summary-in-pr: on-failure
|
||||
# https://github.com/actions/dependency-review-action/issues/430#issuecomment-1468975566
|
||||
base-ref: ${{ github.event.pull_request.base.sha || 'master' }}
|
||||
head-ref: ${{ github.event.pull_request.head.sha || github.ref }}
|
||||
|
|
26
.github/workflows/release.yml
vendored
26
.github/workflows/release.yml
vendored
|
@ -9,6 +9,9 @@ env:
|
|||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release
|
||||
|
@ -17,13 +20,13 @@ jobs:
|
|||
os:
|
||||
- ubuntu-latest
|
||||
go:
|
||||
- '1.24'
|
||||
- '1.25'
|
||||
|
||||
include:
|
||||
# Set the minimum Go patch version for the given Go minor
|
||||
# Usable via ${{ matrix.GO_SEMVER }}
|
||||
- go: '1.24'
|
||||
GO_SEMVER: '~1.24.1'
|
||||
- go: '1.25'
|
||||
GO_SEMVER: '~1.25.0'
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
# https://github.com/sigstore/cosign/issues/1258#issuecomment-1002251233
|
||||
|
@ -35,19 +38,24 @@ jobs:
|
|||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
|
||||
# Force fetch upstream tags -- because 65 minutes
|
||||
# tl;dr: actions/checkout@v4 runs this line:
|
||||
# tl;dr: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v4.2.2 runs this line:
|
||||
# git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
|
||||
# which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
|
||||
# git fetch --prune --unshallow
|
||||
|
@ -101,11 +109,11 @@ jobs:
|
|||
git verify-tag "${{ steps.vars.outputs.version_tag }}" || exit 1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # main
|
||||
- name: Cosign version
|
||||
run: cosign version
|
||||
- name: Install Syft
|
||||
uses: anchore/sbom-action/download-syft@main
|
||||
uses: anchore/sbom-action/download-syft@f8bdd1d8ac5e901a77a92f111440fdb1b593736b # main
|
||||
- name: Syft version
|
||||
run: syft version
|
||||
- name: Install xcaddy
|
||||
|
@ -114,7 +122,7 @@ jobs:
|
|||
xcaddy version
|
||||
# GoReleaser will take care of publishing those artifacts into the release
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --timeout 60m
|
||||
|
|
17
.github/workflows/release_published.yml
vendored
17
.github/workflows/release_published.yml
vendored
|
@ -5,6 +5,9 @@ on:
|
|||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Published
|
||||
|
@ -13,12 +16,20 @@ jobs:
|
|||
os:
|
||||
- ubuntu-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
actions: write
|
||||
steps:
|
||||
|
||||
# See https://github.com/peter-evans/repository-dispatch
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Trigger event on caddyserver/dist
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||
repository: caddyserver/dist
|
||||
|
@ -26,7 +37,7 @@ jobs:
|
|||
client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
|
||||
|
||||
- name: Trigger event on caddyserver/caddy-docker
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||
repository: caddyserver/caddy-docker
|
||||
|
|
86
.github/workflows/scorecard.yml
vendored
Normal file
86
.github/workflows/scorecard.yml
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
# This workflow uses actions that are not certified by GitHub. They are provided
|
||||
# by a third-party and are governed by separate terms of service, privacy
|
||||
# policy, and support documentation.
|
||||
|
||||
name: OpenSSF Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '20 2 * * 5'
|
||||
push:
|
||||
branches: [ "master", "2.*" ]
|
||||
pull_request:
|
||||
branches: [ "master", "2.*" ]
|
||||
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
# `publish_results: true` only works when run from the default branch. conditional can be removed if disabled.
|
||||
if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request'
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore
|
||||
# file_mode: git
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5
|
||||
with:
|
||||
sarif_file: results.sarif
|
242
.golangci.yml
242
.golangci.yml
|
@ -1,27 +1,19 @@
|
|||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- fmt.*
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
|
||||
gci:
|
||||
sections:
|
||||
- standard # Standard section: captures all standard packages.
|
||||
- default # Default section: contains all imports that could not be matched to another section type.
|
||||
- prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
|
||||
- prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
|
||||
# Skip generated files.
|
||||
# Default: true
|
||||
skip-generated: true
|
||||
# Enable custom order of sections.
|
||||
# If `true`, make the section order the same as the order of `sections`.
|
||||
# Default: false
|
||||
custom-order: true
|
||||
exhaustive:
|
||||
ignore-enum-types: reflect.Kind|svc.Cmd
|
||||
|
||||
version: "2"
|
||||
run:
|
||||
issues-exit-code: 1
|
||||
tests: false
|
||||
build-tags:
|
||||
- nobadger
|
||||
- nomysql
|
||||
- nopgx
|
||||
output:
|
||||
formats:
|
||||
text:
|
||||
path: stdout
|
||||
print-linter-name: true
|
||||
print-issued-lines: true
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
|
@ -35,148 +27,96 @@ linters:
|
|||
- errcheck
|
||||
- errname
|
||||
- exhaustive
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
- gofumpt
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- importas
|
||||
- ineffassign
|
||||
- misspell
|
||||
- prealloc
|
||||
- promlinter
|
||||
- sloglint
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- tenv
|
||||
- testableexamples
|
||||
- testifylint
|
||||
- tparallel
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- zerologlint
|
||||
# these are implicitly disabled:
|
||||
# - containedctx
|
||||
# - contextcheck
|
||||
# - cyclop
|
||||
# - depguard
|
||||
# - errchkjson
|
||||
# - errorlint
|
||||
# - exhaustruct
|
||||
# - execinquery
|
||||
# - exhaustruct
|
||||
# - forbidigo
|
||||
# - forcetypeassert
|
||||
# - funlen
|
||||
# - ginkgolinter
|
||||
# - gocheckcompilerdirectives
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gochecksumtype
|
||||
# - gocognit
|
||||
# - goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godot
|
||||
# - godox
|
||||
# - goerr113
|
||||
# - goheader
|
||||
# - gomnd
|
||||
# - gomoddirectives
|
||||
# - gomodguard
|
||||
# - goprintffuncname
|
||||
# - gosmopolitan
|
||||
# - grouper
|
||||
# - inamedparam
|
||||
# - interfacebloat
|
||||
# - ireturn
|
||||
# - lll
|
||||
# - loggercheck
|
||||
# - maintidx
|
||||
# - makezero
|
||||
# - mirror
|
||||
# - musttag
|
||||
# - nakedret
|
||||
# - nestif
|
||||
# - nilerr
|
||||
# - nilnil
|
||||
# - nlreturn
|
||||
# - noctx
|
||||
# - nolintlint
|
||||
# - nonamedreturns
|
||||
# - nosprintfhostport
|
||||
# - paralleltest
|
||||
# - perfsprint
|
||||
# - predeclared
|
||||
# - protogetter
|
||||
# - reassign
|
||||
# - revive
|
||||
# - rowserrcheck
|
||||
# - stylecheck
|
||||
# - tagalign
|
||||
# - tagliatelle
|
||||
# - testpackage
|
||||
# - thelper
|
||||
# - unparam
|
||||
# - usestdlibvars
|
||||
# - varnamelen
|
||||
# - wrapcheck
|
||||
# - wsl
|
||||
|
||||
run:
|
||||
# default concurrency is a available CPU number.
|
||||
# concurrency: 4 # explicitly omit this value to fully utilize available resources.
|
||||
timeout: 5m
|
||||
issues-exit-code: 1
|
||||
tests: false
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
formats:
|
||||
- format: 'colored-line-number'
|
||||
print-issued-lines: true
|
||||
print-linter-name: true
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- text: 'G115' # TODO: Either we should fix the issues or nuke the linter if it's bad
|
||||
linters:
|
||||
- gosec
|
||||
# we aren't calling unknown URL
|
||||
- text: 'G107' # G107: Url provided to HTTP request as taint input
|
||||
linters:
|
||||
- gosec
|
||||
# as a web server that's expected to handle any template, this is totally in the hands of the user.
|
||||
- text: 'G203' # G203: Use of unescaped data in HTML templates
|
||||
linters:
|
||||
- gosec
|
||||
# we're shelling out to known commands, not relying on user-defined input.
|
||||
- text: 'G204' # G204: Audit use of command execution
|
||||
linters:
|
||||
- gosec
|
||||
# the choice of weakrand is deliberate, hence the named import "weakrand"
|
||||
- path: modules/caddyhttp/reverseproxy/selectionpolicies.go
|
||||
text: 'G404' # G404: Insecure random number source (rand)
|
||||
linters:
|
||||
- gosec
|
||||
- path: modules/caddyhttp/reverseproxy/streaming.go
|
||||
text: 'G404' # G404: Insecure random number source (rand)
|
||||
linters:
|
||||
- gosec
|
||||
- path: modules/logging/filters.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: modules/caddyhttp/matchers.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: modules/caddyhttp/vars.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- errcheck
|
||||
settings:
|
||||
staticcheck:
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1006", "-QF1008"] # default, and exclude 1 more undesired check
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- fmt.*
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
|
||||
exhaustive:
|
||||
ignore-enum-types: reflect.Kind|svc.Cmd
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- gosec
|
||||
text: G115 # TODO: Either we should fix the issues or nuke the linter if it's bad
|
||||
- linters:
|
||||
- gosec
|
||||
text: G107 # we aren't calling unknown URL
|
||||
- linters:
|
||||
- gosec
|
||||
text: G203 # as a web server that's expected to handle any template, this is totally in the hands of the user.
|
||||
- linters:
|
||||
- gosec
|
||||
text: G204 # we're shelling out to known commands, not relying on user-defined input.
|
||||
- linters:
|
||||
- gosec
|
||||
# the choice of weakrand is deliberate, hence the named import "weakrand"
|
||||
path: modules/caddyhttp/reverseproxy/selectionpolicies.go
|
||||
text: G404
|
||||
- linters:
|
||||
- gosec
|
||||
path: modules/caddyhttp/reverseproxy/streaming.go
|
||||
text: G404
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/logging/filters.go
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/caddyhttp/matchers.go
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/caddyhttp/vars.go
|
||||
- linters:
|
||||
- errcheck
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard # Standard section: captures all standard packages.
|
||||
- default # Default section: contains all imports that could not be matched to another section type.
|
||||
- prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
|
||||
- prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
|
||||
custom-order: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
20
.pre-commit-config.yaml
Normal file
20
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
repos:
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.16.3
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.52.2
|
||||
hooks:
|
||||
- id: golangci-lint-config-verify
|
||||
- id: golangci-lint
|
||||
- id: golangci-lint-fmt
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
|
@ -14,6 +14,7 @@
|
|||
<p align="center">Caddy is an extensible server platform that uses TLS by default.</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/caddyserver/caddy/actions/workflows/ci.yml"><img src="https://github.com/caddyserver/caddy/actions/workflows/ci.yml/badge.svg"></a>
|
||||
<a href="https://www.bestpractices.dev/projects/7141"><img src="https://www.bestpractices.dev/projects/7141/badge"></a>
|
||||
<a href="https://pkg.go.dev/github.com/caddyserver/caddy/v2"><img src="https://img.shields.io/badge/godoc-reference-%23007d9c.svg"></a>
|
||||
<br>
|
||||
<a href="https://x.com/caddyserver" title="@caddyserver on Twitter"><img src="https://img.shields.io/twitter/follow/caddyserver" alt="@caddyserver on Twitter"></a>
|
||||
|
@ -88,7 +89,7 @@ See [our online documentation](https://caddyserver.com/docs/install) for other i
|
|||
|
||||
Requirements:
|
||||
|
||||
- [Go 1.24.0 or newer](https://golang.org/dl/)
|
||||
- [Go 1.25.0 or newer](https://golang.org/dl/)
|
||||
|
||||
### For development
|
||||
|
||||
|
|
102
admin.go
102
admin.go
|
@ -221,7 +221,8 @@ func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool, _ Co
|
|||
if remote {
|
||||
muxWrap.remoteControl = admin.Remote
|
||||
} else {
|
||||
muxWrap.enforceHost = !addr.isWildcardInterface()
|
||||
// see comment in allowedOrigins() as to why we disable the host check for unix/fd networks
|
||||
muxWrap.enforceHost = !addr.isWildcardInterface() && !addr.IsUnixNetwork() && !addr.IsFdNetwork()
|
||||
muxWrap.allowedOrigins = admin.allowedOrigins(addr)
|
||||
muxWrap.enforceOrigin = admin.EnforceOrigin
|
||||
}
|
||||
|
@ -310,47 +311,43 @@ func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
|
|||
for _, o := range admin.Origins {
|
||||
uniqueOrigins[o] = struct{}{}
|
||||
}
|
||||
if admin.Origins == nil {
|
||||
// RFC 2616, Section 14.26:
|
||||
// "A client MUST include a Host header field in all HTTP/1.1 request
|
||||
// messages. If the requested URI does not include an Internet host
|
||||
// name for the service being requested, then the Host header field MUST
|
||||
// be given with an empty value."
|
||||
//
|
||||
// UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
|
||||
// Understandable, but frustrating. See:
|
||||
// https://github.com/golang/go/issues/60374
|
||||
// See also the discussion here:
|
||||
// https://github.com/golang/go/issues/61431
|
||||
//
|
||||
// We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
|
||||
// in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
|
||||
// bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
|
||||
// security checks, the infosec community assures me that it is secure to do
|
||||
// so, because:
|
||||
//
|
||||
// 1) Browsers do not allow access to unix sockets
|
||||
// 2) DNS is irrelevant to unix sockets
|
||||
//
|
||||
// If either of those two statements ever fail to hold true, it is not the
|
||||
// fault of Caddy.
|
||||
//
|
||||
// Thus, we do not fill out allowed origins and do not enforce Host
|
||||
// requirements for unix sockets. Enforcing it leads to confusion and
|
||||
// frustration, when UDS have their own permissions from the OS.
|
||||
// Enforcing host requirements here is effectively security theater,
|
||||
// and a false sense of security.
|
||||
//
|
||||
// See also the discussion in #6832.
|
||||
if admin.Origins == nil && !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
|
||||
if addr.isLoopback() {
|
||||
if addr.IsUnixNetwork() || addr.IsFdNetwork() {
|
||||
// RFC 2616, Section 14.26:
|
||||
// "A client MUST include a Host header field in all HTTP/1.1 request
|
||||
// messages. If the requested URI does not include an Internet host
|
||||
// name for the service being requested, then the Host header field MUST
|
||||
// be given with an empty value."
|
||||
//
|
||||
// UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
|
||||
// Understandable, but frustrating. See:
|
||||
// https://github.com/golang/go/issues/60374
|
||||
// See also the discussion here:
|
||||
// https://github.com/golang/go/issues/61431
|
||||
//
|
||||
// We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
|
||||
// in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
|
||||
// bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
|
||||
// security checks, the infosec community assures me that it is secure to do
|
||||
// so, because:
|
||||
// 1) Browsers do not allow access to unix sockets
|
||||
// 2) DNS is irrelevant to unix sockets
|
||||
//
|
||||
// I am not quite ready to trust either of those external factors, so instead
|
||||
// of disabling Host/Origin checks, we now allow specific Host values when
|
||||
// accessing the admin endpoint over unix sockets. I definitely don't trust
|
||||
// DNS (e.g. I don't trust 'localhost' to always resolve to the local host),
|
||||
// and IP shouldn't even be used, but if it is for some reason, I think we can
|
||||
// at least be reasonably assured that 127.0.0.1 and ::1 route to the local
|
||||
// machine, meaning that a hypothetical browser origin would have to be on the
|
||||
// local machine as well.
|
||||
uniqueOrigins[""] = struct{}{}
|
||||
uniqueOrigins["127.0.0.1"] = struct{}{}
|
||||
uniqueOrigins["::1"] = struct{}{}
|
||||
} else {
|
||||
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
|
||||
}
|
||||
}
|
||||
if !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
|
||||
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
|
||||
} else {
|
||||
uniqueOrigins[addr.JoinHostPort(0)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -427,6 +424,13 @@ func replaceLocalAdminServer(cfg *Config, ctx Context) error {
|
|||
|
||||
handler := cfg.Admin.newAdminHandler(addr, false, ctx)
|
||||
|
||||
// run the provisioners for loaded modules to make sure local
|
||||
// state is properly re-initialized in the new admin server
|
||||
err = cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -548,6 +552,13 @@ func replaceRemoteAdminServer(ctx Context, cfg *Config) error {
|
|||
// because we are using TLS authentication instead
|
||||
handler := cfg.Admin.newAdminHandler(addr, true, ctx)
|
||||
|
||||
// run the provisioners for loaded modules to make sure local
|
||||
// state is properly re-initialized in the new admin server
|
||||
err = cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create client certificate pool for TLS mutual auth, and extract public keys
|
||||
// so that we can enforce access controls at the application layer
|
||||
clientCertPool := x509.NewCertPool()
|
||||
|
@ -935,7 +946,7 @@ func (h adminHandler) originAllowed(origin *url.URL) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// etagHasher returns a the hasher we used on the config to both
|
||||
// etagHasher returns the hasher we used on the config to both
|
||||
// produce and verify ETags.
|
||||
func etagHasher() hash.Hash { return xxhash.New() }
|
||||
|
||||
|
@ -1018,6 +1029,13 @@ func handleConfig(w http.ResponseWriter, r *http.Request) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// If this request changed the config, clear the last
|
||||
// config info we have stored, if it is different from
|
||||
// the original source.
|
||||
ClearLastConfigIfDifferent(
|
||||
r.Header.Get("Caddy-Config-Source-File"),
|
||||
r.Header.Get("Caddy-Config-Source-Adapter"))
|
||||
|
||||
default:
|
||||
return APIError{
|
||||
HTTPStatus: http.StatusMethodNotAllowed,
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
|
@ -148,11 +149,9 @@ func TestLoadConcurrent(t *testing.T) {
|
|||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
_ = Load(testCfg, true)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
@ -206,7 +205,7 @@ func TestETags(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkLoad(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
Load(testCfg, true)
|
||||
}
|
||||
}
|
||||
|
@ -335,9 +334,7 @@ func TestAdminHandlerBuiltinRouteErrors(t *testing.T) {
|
|||
|
||||
func testGetMetricValue(labels map[string]string) float64 {
|
||||
promLabels := prometheus.Labels{}
|
||||
for k, v := range labels {
|
||||
promLabels[k] = v
|
||||
}
|
||||
maps.Copy(promLabels, labels)
|
||||
|
||||
metric, err := adminMetrics.requestErrors.GetMetricWith(promLabels)
|
||||
if err != nil {
|
||||
|
@ -377,9 +374,7 @@ func (m *mockModule) CaddyModule() ModuleInfo {
|
|||
|
||||
func TestNewAdminHandlerRouterRegistration(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
maps.Copy(originalModules, modules)
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
|
@ -479,9 +474,7 @@ func TestAdminRouterProvisioning(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
maps.Copy(originalModules, modules)
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
|
@ -531,6 +524,7 @@ func TestAdminRouterProvisioning(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
// see comment in allowedOrigins() as to why we do not fill out allowed origins for UDS
|
||||
tests := []struct {
|
||||
name string
|
||||
addr NetworkAddress
|
||||
|
@ -543,12 +537,8 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
|||
Network: "unix",
|
||||
Host: "/tmp/caddy.sock",
|
||||
},
|
||||
origins: nil, // default origins
|
||||
expectOrigins: []string{
|
||||
"", // empty host as per RFC 2616
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
},
|
||||
origins: nil, // default origins
|
||||
expectOrigins: []string{},
|
||||
},
|
||||
{
|
||||
name: "unix socket with custom origins",
|
||||
|
@ -578,7 +568,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for i, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
admin := AdminConfig{
|
||||
Origins: test.origins,
|
||||
|
@ -592,7 +582,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
|||
}
|
||||
|
||||
if len(gotOrigins) != len(test.expectOrigins) {
|
||||
t.Errorf("Expected %d origins but got %d", len(test.expectOrigins), len(gotOrigins))
|
||||
t.Errorf("%d: Expected %d origins but got %d", i, len(test.expectOrigins), len(gotOrigins))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -607,7 +597,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
|||
}
|
||||
|
||||
if !reflect.DeepEqual(expectMap, gotMap) {
|
||||
t.Errorf("Origins mismatch.\nExpected: %v\nGot: %v", test.expectOrigins, gotOrigins)
|
||||
t.Errorf("%d: Origins mismatch.\nExpected: %v\nGot: %v", i, test.expectOrigins, gotOrigins)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -777,9 +767,7 @@ func (m *mockIssuerModule) CaddyModule() ModuleInfo {
|
|||
|
||||
func TestManageIdentity(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
maps.Copy(originalModules, modules)
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
|
|
239
caddy.go
239
caddy.go
|
@ -81,13 +81,17 @@ type Config struct {
|
|||
// associated value.
|
||||
AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="`
|
||||
|
||||
apps map[string]App
|
||||
storage certmagic.Storage
|
||||
apps map[string]App
|
||||
|
||||
// failedApps is a map of apps that failed to provision with their underlying error.
|
||||
failedApps map[string]error
|
||||
storage certmagic.Storage
|
||||
eventEmitter eventEmitter
|
||||
|
||||
cancelFunc context.CancelFunc
|
||||
|
||||
// filesystems is a dict of filesystems that will later be loaded from and added to.
|
||||
filesystems FileSystems
|
||||
// fileSystems is a dict of fileSystems that will later be loaded from and added to.
|
||||
fileSystems FileSystems
|
||||
}
|
||||
|
||||
// App is a thing that Caddy runs.
|
||||
|
@ -407,11 +411,23 @@ func run(newCfg *Config, start bool) (Context, error) {
|
|||
return ctx, nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// if newCfg fails to start completely, clean up the already provisioned modules
|
||||
// partially copied from provisionContext
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
ctx.cfg.cancelFunc()
|
||||
|
||||
if currentCtx.cfg != nil {
|
||||
certmagic.Default.Storage = currentCtx.cfg.storage
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Provision any admin routers which may need to access
|
||||
// some of the other apps at runtime
|
||||
err = ctx.cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
|
@ -437,14 +453,18 @@ func run(newCfg *Config, start bool) (Context, error) {
|
|||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
return ctx, err
|
||||
}
|
||||
globalMetrics.configSuccess.Set(1)
|
||||
globalMetrics.configSuccessTime.SetToCurrentTime()
|
||||
|
||||
// TODO: This event is experimental and subject to change.
|
||||
ctx.emitEvent("started", nil)
|
||||
|
||||
// now that the user's config is running, finish setting up anything else,
|
||||
// such as remote admin endpoint, config loader, etc.
|
||||
return ctx, finishSettingUp(ctx, ctx.cfg)
|
||||
err = finishSettingUp(ctx, ctx.cfg)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// provisionContext creates a new context from the given configuration and provisions
|
||||
|
@ -500,19 +520,12 @@ func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error)
|
|||
return ctx, err
|
||||
}
|
||||
|
||||
// start the admin endpoint (and stop any prior one)
|
||||
if replaceAdminServer {
|
||||
err = replaceLocalAdminServer(newCfg, ctx)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// create the new filesystem map
|
||||
newCfg.filesystems = &filesystems.FilesystemMap{}
|
||||
newCfg.fileSystems = &filesystems.FileSystemMap{}
|
||||
|
||||
// prepare the new config for use
|
||||
newCfg.apps = make(map[string]App)
|
||||
newCfg.failedApps = make(map[string]error)
|
||||
|
||||
// set up global storage and make it CertMagic's default storage, too
|
||||
err = func() error {
|
||||
|
@ -539,6 +552,14 @@ func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error)
|
|||
return ctx, err
|
||||
}
|
||||
|
||||
// start the admin endpoint (and stop any prior one)
|
||||
if replaceAdminServer {
|
||||
err = replaceLocalAdminServer(newCfg, ctx)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load and Provision each app and their submodules
|
||||
err = func() error {
|
||||
for appName := range newCfg.AppsRaw {
|
||||
|
@ -696,6 +717,9 @@ func unsyncedStop(ctx Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: This event is experimental and subject to change.
|
||||
ctx.emitEvent("stopping", nil)
|
||||
|
||||
// stop each app
|
||||
for name, a := range ctx.cfg.apps {
|
||||
err := a.Stop()
|
||||
|
@ -951,11 +975,11 @@ func Version() (simple, full string) {
|
|||
if CustomVersion != "" {
|
||||
full = CustomVersion
|
||||
simple = CustomVersion
|
||||
return
|
||||
return simple, full
|
||||
}
|
||||
full = "unknown"
|
||||
simple = "unknown"
|
||||
return
|
||||
return simple, full
|
||||
}
|
||||
// find the Caddy module in the dependency list
|
||||
for _, dep := range bi.Deps {
|
||||
|
@ -1035,9 +1059,101 @@ func Version() (simple, full string) {
|
|||
}
|
||||
}
|
||||
|
||||
return
|
||||
return simple, full
|
||||
}
|
||||
|
||||
// Event represents something that has happened or is happening.
|
||||
// An Event value is not synchronized, so it should be copied if
|
||||
// being used in goroutines.
|
||||
//
|
||||
// EXPERIMENTAL: Events are subject to change.
|
||||
type Event struct {
|
||||
// If non-nil, the event has been aborted, meaning
|
||||
// propagation has stopped to other handlers and
|
||||
// the code should stop what it was doing. Emitters
|
||||
// may choose to use this as a signal to adjust their
|
||||
// code path appropriately.
|
||||
Aborted error
|
||||
|
||||
// The data associated with the event. Usually the
|
||||
// original emitter will be the only one to set or
|
||||
// change these values, but the field is exported
|
||||
// so handlers can have full access if needed.
|
||||
// However, this map is not synchronized, so
|
||||
// handlers must not use this map directly in new
|
||||
// goroutines; instead, copy the map to use it in a
|
||||
// goroutine. Data may be nil.
|
||||
Data map[string]any
|
||||
|
||||
id uuid.UUID
|
||||
ts time.Time
|
||||
name string
|
||||
origin Module
|
||||
}
|
||||
|
||||
// NewEvent creates a new event, but does not emit the event. To emit an
|
||||
// event, call Emit() on the current instance of the caddyevents app insteaad.
|
||||
//
|
||||
// EXPERIMENTAL: Subject to change.
|
||||
func NewEvent(ctx Context, name string, data map[string]any) (Event, error) {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return Event{}, fmt.Errorf("generating new event ID: %v", err)
|
||||
}
|
||||
name = strings.ToLower(name)
|
||||
return Event{
|
||||
Data: data,
|
||||
id: id,
|
||||
ts: time.Now(),
|
||||
name: name,
|
||||
origin: ctx.Module(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e Event) ID() uuid.UUID { return e.id }
|
||||
func (e Event) Timestamp() time.Time { return e.ts }
|
||||
func (e Event) Name() string { return e.name }
|
||||
func (e Event) Origin() Module { return e.origin } // Returns the module that originated the event. May be nil, usually if caddy core emits the event.
|
||||
|
||||
// CloudEvent exports event e as a structure that, when
|
||||
// serialized as JSON, is compatible with the
|
||||
// CloudEvents spec.
|
||||
func (e Event) CloudEvent() CloudEvent {
|
||||
dataJSON, _ := json.Marshal(e.Data)
|
||||
var source string
|
||||
if e.Origin() == nil {
|
||||
source = "caddy"
|
||||
} else {
|
||||
source = string(e.Origin().CaddyModule().ID)
|
||||
}
|
||||
return CloudEvent{
|
||||
ID: e.id.String(),
|
||||
Source: source,
|
||||
SpecVersion: "1.0",
|
||||
Type: e.name,
|
||||
Time: e.ts,
|
||||
DataContentType: "application/json",
|
||||
Data: dataJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// CloudEvent is a JSON-serializable structure that
|
||||
// is compatible with the CloudEvents specification.
|
||||
// See https://cloudevents.io.
|
||||
// EXPERIMENTAL: Subject to change.
|
||||
type CloudEvent struct {
|
||||
ID string `json:"id"`
|
||||
Source string `json:"source"`
|
||||
SpecVersion string `json:"specversion"`
|
||||
Type string `json:"type"`
|
||||
Time time.Time `json:"time"`
|
||||
DataContentType string `json:"datacontenttype,omitempty"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// ErrEventAborted cancels an event.
|
||||
var ErrEventAborted = errors.New("event aborted")
|
||||
|
||||
// ActiveContext returns the currently-active context.
|
||||
// This function is experimental and might be changed
|
||||
// or removed in the future.
|
||||
|
@ -1081,6 +1197,91 @@ var (
|
|||
rawCfgMu sync.RWMutex
|
||||
)
|
||||
|
||||
// lastConfigFile and lastConfigAdapter remember the source config
|
||||
// file and adapter used when Caddy was started via the CLI "run" command.
|
||||
// These are consulted by the SIGUSR1 handler to attempt reloading from
|
||||
// the same source. They are intentionally not set for other entrypoints
|
||||
// such as "caddy start" or subcommands like file-server.
|
||||
var (
|
||||
lastConfigMu sync.RWMutex
|
||||
lastConfigFile string
|
||||
lastConfigAdapter string
|
||||
)
|
||||
|
||||
// reloadFromSourceFunc is the type of stored callback
|
||||
// which is called when we receive a SIGUSR1 signal.
|
||||
type reloadFromSourceFunc func(file, adapter string) error
|
||||
|
||||
// reloadFromSourceCallback is the stored callback
|
||||
// which is called when we receive a SIGUSR1 signal.
|
||||
var reloadFromSourceCallback reloadFromSourceFunc
|
||||
|
||||
// errReloadFromSourceUnavailable is returned when no reload-from-source callback is set.
|
||||
var errReloadFromSourceUnavailable = errors.New("reload from source unavailable in this process") //nolint:unused
|
||||
|
||||
// SetLastConfig records the given source file and adapter as the
|
||||
// last-known external configuration source. Intended to be called
|
||||
// only when starting via "caddy run --config <file> --adapter <adapter>".
|
||||
func SetLastConfig(file, adapter string, fn reloadFromSourceFunc) {
|
||||
lastConfigMu.Lock()
|
||||
lastConfigFile = file
|
||||
lastConfigAdapter = adapter
|
||||
reloadFromSourceCallback = fn
|
||||
lastConfigMu.Unlock()
|
||||
}
|
||||
|
||||
// ClearLastConfigIfDifferent clears the recorded last-config if the provided
|
||||
// source file/adapter do not match the recorded last-config. If both srcFile
|
||||
// and srcAdapter are empty, the last-config is cleared.
|
||||
func ClearLastConfigIfDifferent(srcFile, srcAdapter string) {
|
||||
if (srcFile != "" || srcAdapter != "") && lastConfigMatches(srcFile, srcAdapter) {
|
||||
return
|
||||
}
|
||||
SetLastConfig("", "", nil)
|
||||
}
|
||||
|
||||
// getLastConfig returns the last-known config file and adapter.
|
||||
func getLastConfig() (file, adapter string, fn reloadFromSourceFunc) {
|
||||
lastConfigMu.RLock()
|
||||
f, a, cb := lastConfigFile, lastConfigAdapter, reloadFromSourceCallback
|
||||
lastConfigMu.RUnlock()
|
||||
return f, a, cb
|
||||
}
|
||||
|
||||
// lastConfigMatches returns true if the provided source file and/or adapter
|
||||
// matches the recorded last-config. Matching rules (in priority order):
|
||||
// 1. If srcAdapter is provided and differs from the recorded adapter, no match.
|
||||
// 2. If srcFile exactly equals the recorded file, match.
|
||||
// 3. If both sides can be made absolute and equal, match.
|
||||
// 4. If basenames are equal, match.
|
||||
func lastConfigMatches(srcFile, srcAdapter string) bool {
|
||||
lf, la, _ := getLastConfig()
|
||||
|
||||
// If adapter is provided, it must match.
|
||||
if srcAdapter != "" && srcAdapter != la {
|
||||
return false
|
||||
}
|
||||
|
||||
// Quick equality check.
|
||||
if srcFile == lf {
|
||||
return true
|
||||
}
|
||||
|
||||
// Try absolute path comparison.
|
||||
sAbs, sErr := filepath.Abs(srcFile)
|
||||
lAbs, lErr := filepath.Abs(lf)
|
||||
if sErr == nil && lErr == nil && sAbs == lAbs {
|
||||
return true
|
||||
}
|
||||
|
||||
// Final fallback: basename equality.
|
||||
if filepath.Base(srcFile) == filepath.Base(lf) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// errSameConfig is returned if the new config is the same
|
||||
// as the old one. This isn't usually an actual, actionable
|
||||
// error; it's mostly a sentinel value.
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package caddy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -72,3 +73,21 @@ func TestParseDuration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvent_CloudEvent_NilOrigin(t *testing.T) {
|
||||
ctx, _ := NewContext(Context{Context: context.Background()}) // module will be nil by default
|
||||
event, err := NewEvent(ctx, "started", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("NewEvent() error = %v", err)
|
||||
}
|
||||
|
||||
// This should not panic
|
||||
ce := event.CloudEvent()
|
||||
|
||||
if ce.Source != "caddy" {
|
||||
t.Errorf("Expected CloudEvent Source to be 'caddy', got '%s'", ce.Source)
|
||||
}
|
||||
if ce.Type != "started" {
|
||||
t.Errorf("Expected CloudEvent Type to be 'started', got '%s'", ce.Type)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconf
|
|||
// TODO: also perform this check on imported files
|
||||
func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
|
||||
// replace windows-style newlines to normalize comparison
|
||||
normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
|
||||
normalizedBody := bytes.ReplaceAll(body, []byte("\r\n"), []byte("\n"))
|
||||
|
||||
formatted := Format(normalizedBody)
|
||||
if bytes.Equal(formatted, normalizedBody) {
|
||||
|
|
|
@ -308,9 +308,9 @@ func (d *Dispenser) CountRemainingArgs() int {
|
|||
}
|
||||
|
||||
// RemainingArgs loads any more arguments (tokens on the same line)
|
||||
// into a slice and returns them. Open curly brace tokens also indicate
|
||||
// the end of arguments, and the curly brace is not included in
|
||||
// the return value nor is it loaded.
|
||||
// into a slice of strings and returns them. Open curly brace tokens
|
||||
// also indicate the end of arguments, and the curly brace is not
|
||||
// included in the return value nor is it loaded.
|
||||
func (d *Dispenser) RemainingArgs() []string {
|
||||
var args []string
|
||||
for d.NextArg() {
|
||||
|
@ -320,9 +320,9 @@ func (d *Dispenser) RemainingArgs() []string {
|
|||
}
|
||||
|
||||
// RemainingArgsRaw loads any more arguments (tokens on the same line,
|
||||
// retaining quotes) into a slice and returns them. Open curly brace
|
||||
// tokens also indicate the end of arguments, and the curly brace is
|
||||
// not included in the return value nor is it loaded.
|
||||
// retaining quotes) into a slice of strings and returns them.
|
||||
// Open curly brace tokens also indicate the end of arguments,
|
||||
// and the curly brace is not included in the return value nor is it loaded.
|
||||
func (d *Dispenser) RemainingArgsRaw() []string {
|
||||
var args []string
|
||||
for d.NextArg() {
|
||||
|
@ -331,6 +331,18 @@ func (d *Dispenser) RemainingArgsRaw() []string {
|
|||
return args
|
||||
}
|
||||
|
||||
// RemainingArgsAsTokens loads any more arguments (tokens on the same line)
|
||||
// into a slice of Token-structs and returns them. Open curly brace tokens
|
||||
// also indicate the end of arguments, and the curly brace is not included
|
||||
// in the return value nor is it loaded.
|
||||
func (d *Dispenser) RemainingArgsAsTokens() []Token {
|
||||
var args []Token
|
||||
for d.NextArg() {
|
||||
args = append(args, d.Token())
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// NewFromNextSegment returns a new dispenser with a copy of
|
||||
// the tokens from the current token until the end of the
|
||||
// "directive" whether that be to the end of the line or
|
||||
|
|
|
@ -274,6 +274,66 @@ func TestDispenser_RemainingArgs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDispenser_RemainingArgsAsTokens(t *testing.T) {
|
||||
input := `dir1 arg1 arg2 arg3
|
||||
dir2 arg4 arg5
|
||||
dir3 arg6 { arg7
|
||||
dir4`
|
||||
d := NewTestDispenser(input)
|
||||
|
||||
d.Next() // dir1
|
||||
|
||||
args := d.RemainingArgsAsTokens()
|
||||
|
||||
tokenTexts := make([]string, 0, len(args))
|
||||
for _, arg := range args {
|
||||
tokenTexts = append(tokenTexts, arg.Text)
|
||||
}
|
||||
|
||||
if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(tokenTexts, expected) {
|
||||
t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts)
|
||||
}
|
||||
|
||||
d.Next() // dir2
|
||||
|
||||
args = d.RemainingArgsAsTokens()
|
||||
|
||||
tokenTexts = tokenTexts[:0]
|
||||
for _, arg := range args {
|
||||
tokenTexts = append(tokenTexts, arg.Text)
|
||||
}
|
||||
|
||||
if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(tokenTexts, expected) {
|
||||
t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts)
|
||||
}
|
||||
|
||||
d.Next() // dir3
|
||||
|
||||
args = d.RemainingArgsAsTokens()
|
||||
tokenTexts = tokenTexts[:0]
|
||||
for _, arg := range args {
|
||||
tokenTexts = append(tokenTexts, arg.Text)
|
||||
}
|
||||
|
||||
if expected := []string{"arg6"}; !reflect.DeepEqual(tokenTexts, expected) {
|
||||
t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts)
|
||||
}
|
||||
|
||||
d.Next() // {
|
||||
d.Next() // arg7
|
||||
d.Next() // dir4
|
||||
|
||||
args = d.RemainingArgsAsTokens()
|
||||
tokenTexts = tokenTexts[:0]
|
||||
for _, arg := range args {
|
||||
tokenTexts = append(tokenTexts, arg.Text)
|
||||
}
|
||||
|
||||
if len(args) != 0 {
|
||||
t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", []string{}, tokenTexts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDispenser_ArgErr_Err(t *testing.T) {
|
||||
input := `dir1 {
|
||||
}
|
||||
|
|
|
@ -61,7 +61,8 @@ func Format(input []byte) []byte {
|
|||
heredocMarker []rune
|
||||
heredocClosingMarker []rune
|
||||
|
||||
nesting int // indentation level
|
||||
nesting int // indentation level
|
||||
withinBackquote bool
|
||||
)
|
||||
|
||||
write := func(ch rune) {
|
||||
|
@ -88,9 +89,12 @@ func Format(input []byte) []byte {
|
|||
}
|
||||
panic(err)
|
||||
}
|
||||
if ch == '`' {
|
||||
withinBackquote = !withinBackquote
|
||||
}
|
||||
|
||||
// detect whether we have the start of a heredoc
|
||||
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
|
||||
if !quoted && (heredoc == heredocClosed && !heredocEscaped) &&
|
||||
space && last == '<' && ch == '<' {
|
||||
write(ch)
|
||||
heredoc = heredocOpening
|
||||
|
@ -220,7 +224,7 @@ func Format(input []byte) []byte {
|
|||
openBrace = false
|
||||
if beginningOfLine {
|
||||
indent()
|
||||
} else if !openBraceSpace {
|
||||
} else if !openBraceSpace || !unicode.IsSpace(last) {
|
||||
write(' ')
|
||||
}
|
||||
write('{')
|
||||
|
@ -236,14 +240,23 @@ func Format(input []byte) []byte {
|
|||
switch {
|
||||
case ch == '{':
|
||||
openBrace = true
|
||||
openBraceWritten = false
|
||||
openBraceSpace = spacePrior && !beginningOfLine
|
||||
if openBraceSpace {
|
||||
if openBraceSpace && newLines == 0 {
|
||||
write(' ')
|
||||
}
|
||||
openBraceWritten = false
|
||||
if withinBackquote {
|
||||
write('{')
|
||||
openBraceWritten = true
|
||||
continue
|
||||
}
|
||||
continue
|
||||
|
||||
case ch == '}' && (spacePrior || !openBrace):
|
||||
if withinBackquote {
|
||||
write('}')
|
||||
continue
|
||||
}
|
||||
if last != '\n' {
|
||||
nextLine()
|
||||
}
|
||||
|
|
|
@ -432,6 +432,31 @@ block2 {
|
|||
heredoc \<<HEREDOC
|
||||
respond "More than one space will be eaten" 200
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
description: "Preserve braces wrapped by backquotes",
|
||||
input: "block {respond `All braces should remain: {{now | date \"2006\"}}`}",
|
||||
expect: "block {respond `All braces should remain: {{now | date \"2006\"}}`}",
|
||||
},
|
||||
{
|
||||
description: "Preserve braces wrapped by quotes",
|
||||
input: "block {respond \"All braces should remain: {{now | date `2006`}}\"}",
|
||||
expect: "block {respond \"All braces should remain: {{now | date `2006`}}\"}",
|
||||
},
|
||||
{
|
||||
description: "No trailing space on line before env variable",
|
||||
input: `{
|
||||
a
|
||||
|
||||
{$ENV_VAR}
|
||||
}
|
||||
`,
|
||||
expect: `{
|
||||
a
|
||||
|
||||
{$ENV_VAR}
|
||||
}
|
||||
`,
|
||||
},
|
||||
} {
|
||||
|
|
|
@ -137,7 +137,7 @@ func (l *lexer) next() (bool, error) {
|
|||
}
|
||||
|
||||
// detect whether we have the start of a heredoc
|
||||
if !(quoted || btQuoted) && !(inHeredoc || heredocEscaped) &&
|
||||
if (!quoted && !btQuoted) && (!inHeredoc && !heredocEscaped) &&
|
||||
len(val) > 1 && string(val[:2]) == "<<" {
|
||||
// a space means it's just a regular token and not a heredoc
|
||||
if ch == ' ' {
|
||||
|
@ -323,7 +323,8 @@ func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
|
|||
|
||||
// if the padding doesn't match exactly at the start then we can't safely strip
|
||||
if index != 0 {
|
||||
return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, lineText, paddingToStrip)
|
||||
cleanLineText := strings.TrimRight(lineText, "\r\n")
|
||||
return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, cleanLineText, paddingToStrip)
|
||||
}
|
||||
|
||||
// strip, then append the line, with the newline, to the output.
|
||||
|
|
|
@ -379,28 +379,23 @@ func (p *parser) doImport(nesting int) error {
|
|||
if len(blockTokens) > 0 {
|
||||
// use such tokens to create a new dispenser, and then use it to parse each block
|
||||
bd := NewDispenser(blockTokens)
|
||||
|
||||
// one iteration processes one sub-block inside the import
|
||||
for bd.Next() {
|
||||
// see if we can grab a key
|
||||
var currentMappingKey string
|
||||
if bd.Val() == "{" {
|
||||
currentMappingKey := bd.Val()
|
||||
|
||||
if currentMappingKey == "{" {
|
||||
return p.Err("anonymous blocks are not supported")
|
||||
}
|
||||
currentMappingKey = bd.Val()
|
||||
currentMappingTokens := []Token{}
|
||||
// read all args until end of line / {
|
||||
if bd.NextArg() {
|
||||
|
||||
// load up all arguments (if there even are any)
|
||||
currentMappingTokens := bd.RemainingArgsAsTokens()
|
||||
|
||||
// load up the entire block
|
||||
for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
|
||||
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
||||
for bd.NextArg() {
|
||||
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
||||
}
|
||||
// TODO(elee1766): we don't enter another mapping here because it's annoying to extract the { and } properly.
|
||||
// maybe someone can do that in the future
|
||||
} else {
|
||||
// attempt to enter a block and add tokens to the currentMappingTokens
|
||||
for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
|
||||
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
||||
}
|
||||
}
|
||||
|
||||
blockMapping[currentMappingKey] = currentMappingTokens
|
||||
}
|
||||
}
|
||||
|
@ -538,29 +533,24 @@ func (p *parser) doImport(nesting int) error {
|
|||
}
|
||||
// if it is {block}, we substitute with all tokens in the block
|
||||
// if it is {blocks.*}, we substitute with the tokens in the mapping for the *
|
||||
var skip bool
|
||||
var tokensToAdd []Token
|
||||
foundBlockDirective := false
|
||||
switch {
|
||||
case token.Text == "{block}":
|
||||
foundBlockDirective = true
|
||||
tokensToAdd = blockTokens
|
||||
case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"):
|
||||
foundBlockDirective = true
|
||||
// {blocks.foo.bar} will be extracted to key `foo.bar`
|
||||
blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.")
|
||||
val, ok := blockMapping[blockKey]
|
||||
if ok {
|
||||
tokensToAdd = val
|
||||
}
|
||||
default:
|
||||
skip = true
|
||||
}
|
||||
if !skip {
|
||||
if len(tokensToAdd) == 0 {
|
||||
// if there is no content in the snippet block, don't do any replacement
|
||||
// this allows snippets which contained {block}/{block.*} before this change to continue functioning as normal
|
||||
tokensCopy = append(tokensCopy, token)
|
||||
} else {
|
||||
tokensCopy = append(tokensCopy, tokensToAdd...)
|
||||
}
|
||||
|
||||
if foundBlockDirective {
|
||||
tokensCopy = append(tokensCopy, tokensToAdd...)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -884,6 +885,51 @@ func TestRejectsGlobalMatcher(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRejectAnonymousImportBlock(t *testing.T) {
|
||||
p := testParser(`
|
||||
(site) {
|
||||
http://{args[0]} https://{args[0]} {
|
||||
{block}
|
||||
}
|
||||
}
|
||||
|
||||
import site test.domain {
|
||||
{
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {remote_host}
|
||||
}
|
||||
}
|
||||
`)
|
||||
_, err := p.parseAll()
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error, but got nil")
|
||||
}
|
||||
expected := "anonymous blocks are not supported"
|
||||
if !strings.HasPrefix(err.Error(), "anonymous blocks are not supported") {
|
||||
t.Errorf("Expected error to start with '%s' but got '%v'", expected, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptSiteImportWithBraces(t *testing.T) {
|
||||
p := testParser(`
|
||||
(site) {
|
||||
http://{args[0]} https://{args[0]} {
|
||||
{block}
|
||||
}
|
||||
}
|
||||
|
||||
import site test.domain {
|
||||
reverse_proxy http://192.168.1.1:8080 {
|
||||
header_up Host {host}
|
||||
}
|
||||
}
|
||||
`)
|
||||
_, err := p.parseAll()
|
||||
if err != nil {
|
||||
t.Errorf("Expected error to be nil but got '%v'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testParser(input string) parser {
|
||||
return parser{Dispenser: NewTestDispenser(input)}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
|
@ -90,7 +91,7 @@ func parseBind(h Helper) ([]ConfigValue, error) {
|
|||
// curves <curves...>
|
||||
// client_auth {
|
||||
// mode [request|require|verify_if_given|require_and_verify]
|
||||
// trust_pool <module_name> [...]
|
||||
// trust_pool <module_name> [...]
|
||||
// trusted_leaf_cert <base64_der>
|
||||
// trusted_leaf_cert_file <filename>
|
||||
// }
|
||||
|
@ -129,6 +130,9 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
var reusePrivateKeys bool
|
||||
var forceAutomate bool
|
||||
|
||||
// Track which DNS challenge options are set
|
||||
var dnsOptionsSet []string
|
||||
|
||||
firstLine := h.RemainingArgs()
|
||||
switch len(firstLine) {
|
||||
case 0:
|
||||
|
@ -349,6 +353,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
dnsOptionsSet = append(dnsOptionsSet, "resolvers")
|
||||
acmeIssuer.Challenges.DNS.Resolvers = args
|
||||
|
||||
case "propagation_delay":
|
||||
|
@ -370,6 +375,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
dnsOptionsSet = append(dnsOptionsSet, "propagation_delay")
|
||||
acmeIssuer.Challenges.DNS.PropagationDelay = caddy.Duration(delay)
|
||||
|
||||
case "propagation_timeout":
|
||||
|
@ -397,6 +403,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
dnsOptionsSet = append(dnsOptionsSet, "propagation_timeout")
|
||||
acmeIssuer.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
|
||||
|
||||
case "dns_ttl":
|
||||
|
@ -418,6 +425,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
dnsOptionsSet = append(dnsOptionsSet, "dns_ttl")
|
||||
acmeIssuer.Challenges.DNS.TTL = caddy.Duration(ttl)
|
||||
|
||||
case "dns_challenge_override_domain":
|
||||
|
@ -434,6 +442,7 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
dnsOptionsSet = append(dnsOptionsSet, "dns_challenge_override_domain")
|
||||
acmeIssuer.Challenges.DNS.OverrideDomain = arg[0]
|
||||
|
||||
case "ca_root":
|
||||
|
@ -469,6 +478,18 @@ func parseTLS(h Helper) ([]ConfigValue, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Validate DNS challenge config: any DNS challenge option except "dns" requires a DNS provider
|
||||
if acmeIssuer != nil && acmeIssuer.Challenges != nil && acmeIssuer.Challenges.DNS != nil {
|
||||
dnsCfg := acmeIssuer.Challenges.DNS
|
||||
providerSet := dnsCfg.ProviderRaw != nil || h.Option("dns") != nil || h.Option("acme_dns") != nil
|
||||
if len(dnsOptionsSet) > 0 && !providerSet {
|
||||
return nil, h.Errf(
|
||||
"setting DNS challenge options [%s] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option)",
|
||||
strings.Join(dnsOptionsSet, ", "),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// a naked tls directive is not allowed
|
||||
if len(firstLine) == 0 && !hasBlock {
|
||||
return nil, h.ArgErr()
|
||||
|
@ -843,13 +864,18 @@ func parseHandleErrors(h Helper) ([]ConfigValue, error) {
|
|||
return nil, h.Errf("segment was not parsed as a subroute")
|
||||
}
|
||||
|
||||
// wrap the subroutes
|
||||
wrappingRoute := caddyhttp.Route{
|
||||
HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(subroute, "handler", "subroute", nil)},
|
||||
}
|
||||
subroute = &caddyhttp.Subroute{
|
||||
Routes: []caddyhttp.Route{wrappingRoute},
|
||||
}
|
||||
if expression != "" {
|
||||
statusMatcher := caddy.ModuleMap{
|
||||
"expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
|
||||
}
|
||||
for i := range subroute.Routes {
|
||||
subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
|
||||
}
|
||||
subroute.Routes[0].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
|
||||
}
|
||||
return []ConfigValue{
|
||||
{
|
||||
|
@ -1160,6 +1186,11 @@ func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
|||
if h.NextArg() {
|
||||
return nil, h.ArgErr()
|
||||
}
|
||||
|
||||
if h.NextBlock(0) {
|
||||
return nil, h.Err("log_skip directive does not accept blocks")
|
||||
}
|
||||
|
||||
return caddyhttp.VarsMiddleware{"log_skip": true}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ package httpcaddyfile
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"net"
|
||||
"slices"
|
||||
"sort"
|
||||
|
@ -173,10 +174,12 @@ func RegisterDirectiveOrder(dir string, position Positional, standardDir string)
|
|||
if d != standardDir {
|
||||
continue
|
||||
}
|
||||
if position == Before {
|
||||
switch position {
|
||||
case Before:
|
||||
newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
|
||||
} else if position == After {
|
||||
case After:
|
||||
newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
|
||||
case First, Last:
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -365,9 +368,7 @@ func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
|
|||
// copy existing matcher definitions so we can augment
|
||||
// new ones that are defined only in this scope
|
||||
matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
|
||||
for key, val := range h.matcherDefs {
|
||||
matcherDefs[key] = val
|
||||
}
|
||||
maps.Copy(matcherDefs, h.matcherDefs)
|
||||
|
||||
// find and extract any embedded matcher definitions in this scope
|
||||
for i := 0; i < len(segments); i++ {
|
||||
|
@ -483,12 +484,29 @@ func sortRoutes(routes []ConfigValue) {
|
|||
// we can only confidently compare path lengths if both
|
||||
// directives have a single path to match (issue #5037)
|
||||
if iPathLen > 0 && jPathLen > 0 {
|
||||
// trim the trailing wildcard if there is one
|
||||
iPathTrimmed := strings.TrimSuffix(iPM[0], "*")
|
||||
jPathTrimmed := strings.TrimSuffix(jPM[0], "*")
|
||||
|
||||
// if both paths are the same except for a trailing wildcard,
|
||||
// sort by the shorter path first (which is more specific)
|
||||
if strings.TrimSuffix(iPM[0], "*") == strings.TrimSuffix(jPM[0], "*") {
|
||||
if iPathTrimmed == jPathTrimmed {
|
||||
return iPathLen < jPathLen
|
||||
}
|
||||
|
||||
// we use the trimmed length to compare the paths
|
||||
// https://github.com/caddyserver/caddy/issues/7012#issuecomment-2870142195
|
||||
// credit to https://github.com/Hellio404
|
||||
// for sorts with many items, mixing matchers w/ and w/o wildcards will confuse the sort and result in incorrect orders
|
||||
iPathLen = len(iPathTrimmed)
|
||||
jPathLen = len(jPathTrimmed)
|
||||
|
||||
// if both paths have the same length, sort lexically
|
||||
// https://github.com/caddyserver/caddy/pull/7015#issuecomment-2871993588
|
||||
if iPathLen == jPathLen {
|
||||
return iPathTrimmed < jPathTrimmed
|
||||
}
|
||||
|
||||
// sort most-specific (longest) path first
|
||||
return iPathLen > jPathLen
|
||||
}
|
||||
|
|
|
@ -633,12 +633,6 @@ func (st *ServerType) serversFromPairings(
|
|||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.IgnoreLoadedCerts = true
|
||||
|
||||
case "prefer_wildcard":
|
||||
if srv.AutoHTTPS == nil {
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.PreferWildcard = true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -706,16 +700,6 @@ func (st *ServerType) serversFromPairings(
|
|||
return specificity(iLongestHost) > specificity(jLongestHost)
|
||||
})
|
||||
|
||||
// collect all hosts that have a wildcard in them
|
||||
wildcardHosts := []string{}
|
||||
for _, sblock := range p.serverBlocks {
|
||||
for _, addr := range sblock.parsedKeys {
|
||||
if strings.HasPrefix(addr.Host, "*.") {
|
||||
wildcardHosts = append(wildcardHosts, addr.Host[2:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
|
||||
autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled
|
||||
|
||||
|
@ -801,7 +785,13 @@ func (st *ServerType) serversFromPairings(
|
|||
cp.FallbackSNI = fallbackSNI
|
||||
}
|
||||
|
||||
// only append this policy if it actually changes something
|
||||
// only append this policy if it actually changes something,
|
||||
// or if the configuration explicitly automates certs for
|
||||
// these names (this is necessary to hoist a connection policy
|
||||
// above one that may manually load a wildcard cert that would
|
||||
// otherwise clobber the automated one; the code that appends
|
||||
// policies that manually load certs comes later, so they're
|
||||
// lower in the list)
|
||||
if !cp.SettingsEmpty() || mapContains(forceAutomatedNames, hosts) {
|
||||
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
|
||||
hasCatchAllTLSConnPolicy = len(hosts) == 0
|
||||
|
@ -841,18 +831,6 @@ func (st *ServerType) serversFromPairings(
|
|||
addressQualifiesForTLS = true
|
||||
}
|
||||
|
||||
// If prefer wildcard is enabled, then we add hosts that are
|
||||
// already covered by the wildcard to the skip list
|
||||
if addressQualifiesForTLS && srv.AutoHTTPS != nil && srv.AutoHTTPS.PreferWildcard {
|
||||
baseDomain := addr.Host
|
||||
if idx := strings.Index(baseDomain, "."); idx != -1 {
|
||||
baseDomain = baseDomain[idx+1:]
|
||||
}
|
||||
if !strings.HasPrefix(addr.Host, "*.") && slices.Contains(wildcardHosts, baseDomain) {
|
||||
srv.AutoHTTPS.SkipCerts = append(srv.AutoHTTPS.SkipCerts, addr.Host)
|
||||
}
|
||||
}
|
||||
|
||||
// predict whether auto-HTTPS will add the conn policy for us; if so, we
|
||||
// may not need to add one for this server
|
||||
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
|
||||
|
@ -1083,11 +1061,40 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
|||
|
||||
// if they're exactly equal in every way, just keep one of them
|
||||
if reflect.DeepEqual(cps[i], cps[j]) {
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
|
||||
// as a special case, if there are adjacent TLS conn policies that are identical except
|
||||
// by their matchers, and the matchers are specifically just ServerName ("sni") matchers
|
||||
// (by far the most common), we can combine them into a single policy
|
||||
if i == j-1 && len(cps[i].MatchersRaw) == 1 && len(cps[j].MatchersRaw) == 1 {
|
||||
if iSNIMatcherJSON, ok := cps[i].MatchersRaw["sni"]; ok {
|
||||
if jSNIMatcherJSON, ok := cps[j].MatchersRaw["sni"]; ok {
|
||||
// position of policies and the matcher criteria check out; if settings are
|
||||
// the same, then we can combine the policies; we have to unmarshal and
|
||||
// remarshal the matchers though
|
||||
if cps[i].SettingsEqual(*cps[j]) {
|
||||
var iSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(iSNIMatcherJSON, &iSNIMatcher); err == nil {
|
||||
var jSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(jSNIMatcherJSON, &jSNIMatcher); err == nil {
|
||||
iSNIMatcher = append(iSNIMatcher, jSNIMatcher...)
|
||||
cps[i].MatchersRaw["sni"], err = json.Marshal(iSNIMatcher)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("recombining SNI matchers: %v", err)
|
||||
}
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if they have the same matcher, try to reconcile each field: either they must
|
||||
// be identical, or we have to be able to combine them safely
|
||||
if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
|
||||
|
@ -1189,12 +1196,13 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
|||
}
|
||||
}
|
||||
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cps, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -458,8 +458,6 @@ func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
|
|||
case "disable_certs":
|
||||
case "ignore_loaded_certs":
|
||||
case "prefer_wildcard":
|
||||
break
|
||||
|
||||
default:
|
||||
return "", d.Errf("auto_https must be one of 'off', 'disable_redirects', 'disable_certs', 'ignore_loaded_certs', or 'prefer_wildcard'")
|
||||
}
|
||||
|
@ -557,8 +555,14 @@ func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) {
|
|||
|
||||
func parseOptDNS(d *caddyfile.Dispenser, _ any) (any, error) {
|
||||
d.Next() // consume option name
|
||||
optName := d.Val()
|
||||
|
||||
if !d.Next() { // get DNS module name
|
||||
// get DNS module name
|
||||
if !d.Next() {
|
||||
// this is allowed if this is the "acme_dns" option since it may refer to the globally-configured "dns" option's value
|
||||
if optName == "acme_dns" {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
modID := "dns.providers." + d.Val()
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
|
@ -178,6 +180,15 @@ func (st ServerType) buildPKIApp(
|
|||
if _, ok := options["skip_install_trust"]; ok {
|
||||
skipInstallTrust = true
|
||||
}
|
||||
|
||||
// check if auto_https is off - in that case we should not create
|
||||
// any PKI infrastructure even with skip_install_trust directive
|
||||
autoHTTPS := []string{}
|
||||
if ah, ok := options["auto_https"].([]string); ok {
|
||||
autoHTTPS = ah
|
||||
}
|
||||
autoHTTPSOff := slices.Contains(autoHTTPS, "off")
|
||||
|
||||
falseBool := false
|
||||
|
||||
// Load the PKI app configured via global options
|
||||
|
@ -218,7 +229,8 @@ func (st ServerType) buildPKIApp(
|
|||
// if there was no CAs defined in any of the servers,
|
||||
// and we were requested to not install trust, then
|
||||
// add one for the default/local CA to do so
|
||||
if len(pkiApp.CAs) == 0 && skipInstallTrust {
|
||||
// only if auto_https is not completely disabled
|
||||
if len(pkiApp.CAs) == 0 && skipInstallTrust && !autoHTTPSOff {
|
||||
ca := new(caddypki.CA)
|
||||
ca.ID = caddypki.DefaultCAID
|
||||
ca.InstallTrust = &falseBool
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
|
||||
|
@ -42,12 +43,15 @@ type serverOptions struct {
|
|||
WriteTimeout caddy.Duration
|
||||
IdleTimeout caddy.Duration
|
||||
KeepAliveInterval caddy.Duration
|
||||
KeepAliveIdle caddy.Duration
|
||||
KeepAliveCount int
|
||||
MaxHeaderBytes int
|
||||
EnableFullDuplex bool
|
||||
Protocols []string
|
||||
StrictSNIHost *bool
|
||||
TrustedProxiesRaw json.RawMessage
|
||||
TrustedProxiesStrict int
|
||||
TrustedProxiesUnix bool
|
||||
ClientIPHeaders []string
|
||||
ShouldLogCredentials bool
|
||||
Metrics *caddyhttp.Metrics
|
||||
|
@ -142,6 +146,7 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
|
|||
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
|
||||
}
|
||||
}
|
||||
|
||||
case "keepalive_interval":
|
||||
if !d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
|
@ -152,6 +157,26 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
|
|||
}
|
||||
serverOpts.KeepAliveInterval = caddy.Duration(dur)
|
||||
|
||||
case "keepalive_idle":
|
||||
if !d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
dur, err := caddy.ParseDuration(d.Val())
|
||||
if err != nil {
|
||||
return nil, d.Errf("parsing keepalive idle duration: %v", err)
|
||||
}
|
||||
serverOpts.KeepAliveIdle = caddy.Duration(dur)
|
||||
|
||||
case "keepalive_count":
|
||||
if !d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
cnt, err := strconv.ParseInt(d.Val(), 10, 32)
|
||||
if err != nil {
|
||||
return nil, d.Errf("parsing keepalive count int: %v", err)
|
||||
}
|
||||
serverOpts.KeepAliveCount = int(cnt)
|
||||
|
||||
case "max_header_size":
|
||||
var sizeStr string
|
||||
if !d.AllArgs(&sizeStr) {
|
||||
|
@ -227,6 +252,12 @@ func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
|
|||
}
|
||||
serverOpts.TrustedProxiesStrict = 1
|
||||
|
||||
case "trusted_proxies_unix":
|
||||
if d.NextArg() {
|
||||
return nil, d.ArgErr()
|
||||
}
|
||||
serverOpts.TrustedProxiesUnix = true
|
||||
|
||||
case "client_ip_headers":
|
||||
headers := d.RemainingArgs()
|
||||
for _, header := range headers {
|
||||
|
@ -309,6 +340,8 @@ func applyServerOptions(
|
|||
server.WriteTimeout = opts.WriteTimeout
|
||||
server.IdleTimeout = opts.IdleTimeout
|
||||
server.KeepAliveInterval = opts.KeepAliveInterval
|
||||
server.KeepAliveIdle = opts.KeepAliveIdle
|
||||
server.KeepAliveCount = opts.KeepAliveCount
|
||||
server.MaxHeaderBytes = opts.MaxHeaderBytes
|
||||
server.EnableFullDuplex = opts.EnableFullDuplex
|
||||
server.Protocols = opts.Protocols
|
||||
|
@ -316,6 +349,7 @@ func applyServerOptions(
|
|||
server.TrustedProxiesRaw = opts.TrustedProxiesRaw
|
||||
server.ClientIPHeaders = opts.ClientIPHeaders
|
||||
server.TrustedProxiesStrict = opts.TrustedProxiesStrict
|
||||
server.TrustedProxiesUnix = opts.TrustedProxiesUnix
|
||||
server.Metrics = opts.Metrics
|
||||
if opts.ShouldLogCredentials {
|
||||
if server.Logs == nil {
|
||||
|
|
|
@ -64,10 +64,13 @@ func placeholderShorthands() []string {
|
|||
"{orig_?query}", "{http.request.orig_uri.prefixed_query}",
|
||||
"{method}", "{http.request.method}",
|
||||
"{uri}", "{http.request.uri}",
|
||||
"{%uri}", "{http.request.uri_escaped}",
|
||||
"{path}", "{http.request.uri.path}",
|
||||
"{%path}", "{http.request.uri.path_escaped}",
|
||||
"{dir}", "{http.request.uri.path.dir}",
|
||||
"{file}", "{http.request.uri.path.file}",
|
||||
"{query}", "{http.request.uri.query}",
|
||||
"{%query}", "{http.request.uri.query_escaped}",
|
||||
"{?query}", "{http.request.uri.prefixed_query}",
|
||||
"{remote}", "{http.request.remote}",
|
||||
"{remote_host}", "{http.request.remote.host}",
|
||||
|
|
|
@ -92,11 +92,9 @@ func (st ServerType) buildTLSApp(
|
|||
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
|
||||
}
|
||||
|
||||
// collect all hosts that have a wildcard in them, and arent HTTP
|
||||
wildcardHosts := []string{}
|
||||
// hosts that have been explicitly marked to be automated,
|
||||
// even if covered by another wildcard
|
||||
forcedAutomatedNames := make(map[string]struct{})
|
||||
var wildcardHosts []string // collect all hosts that have a wildcard in them, and aren't HTTP
|
||||
forcedAutomatedNames := make(map[string]struct{}) // explicitly configured to be automated, even if covered by a wildcard
|
||||
|
||||
for _, p := range pairings {
|
||||
var addresses []string
|
||||
for _, addressWithProtocols := range p.addressesWithProtocols {
|
||||
|
@ -153,7 +151,7 @@ func (st ServerType) buildTLSApp(
|
|||
ap.OnDemand = true
|
||||
}
|
||||
|
||||
// collect hosts that are forced to be automated
|
||||
// collect hosts that are forced to have certs automated for their specific name
|
||||
if _, ok := sblock.pile["tls.force_automate"]; ok {
|
||||
for _, host := range sblockHosts {
|
||||
forcedAutomatedNames[host] = struct{}{}
|
||||
|
@ -340,7 +338,7 @@ func (st ServerType) buildTLSApp(
|
|||
combined = reflect.New(reflect.TypeOf(cl)).Elem()
|
||||
}
|
||||
clVal := reflect.ValueOf(cl)
|
||||
for i := 0; i < clVal.Len(); i++ {
|
||||
for i := range clVal.Len() {
|
||||
combined = reflect.Append(combined, clVal.Index(i))
|
||||
}
|
||||
loadersByName[name] = combined.Interface().(caddytls.CertificateLoader)
|
||||
|
@ -375,7 +373,9 @@ func (st ServerType) buildTLSApp(
|
|||
return nil, warnings, err
|
||||
}
|
||||
for _, cfg := range ech.Configs {
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
if cfg.PublicName != "" {
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
}
|
||||
}
|
||||
if tlsApp.Automation == nil {
|
||||
tlsApp.Automation = new(caddytls.AutomationConfig)
|
||||
|
@ -464,12 +464,12 @@ func (st ServerType) buildTLSApp(
|
|||
globalEmail := options["email"]
|
||||
globalACMECA := options["acme_ca"]
|
||||
globalACMECARoot := options["acme_ca_root"]
|
||||
globalACMEDNS := options["acme_dns"]
|
||||
_, globalACMEDNS := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set
|
||||
globalACMEEAB := options["acme_eab"]
|
||||
globalPreferredChains := options["preferred_chains"]
|
||||
hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil
|
||||
hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS || globalACMEEAB != nil || globalPreferredChains != nil
|
||||
if hasGlobalACMEDefaults {
|
||||
for i := 0; i < len(tlsApp.Automation.Policies); i++ {
|
||||
for i := range tlsApp.Automation.Policies {
|
||||
ap := tlsApp.Automation.Policies[i]
|
||||
if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) {
|
||||
// for public names, create default issuers which will later be filled in with configured global defaults
|
||||
|
@ -549,11 +549,12 @@ func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) e
|
|||
globalEmail := options["email"]
|
||||
globalACMECA := options["acme_ca"]
|
||||
globalACMECARoot := options["acme_ca_root"]
|
||||
globalACMEDNS := options["acme_dns"]
|
||||
globalACMEDNS, globalACMEDNSok := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set
|
||||
globalACMEEAB := options["acme_eab"]
|
||||
globalPreferredChains := options["preferred_chains"]
|
||||
globalCertLifetime := options["cert_lifetime"]
|
||||
globalHTTPPort, globalHTTPSPort := options["http_port"], options["https_port"]
|
||||
globalDefaultBind := options["default_bind"]
|
||||
|
||||
if globalEmail != nil && acmeIssuer.Email == "" {
|
||||
acmeIssuer.Email = globalEmail.(string)
|
||||
|
@ -564,11 +565,21 @@ func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) e
|
|||
if globalACMECARoot != nil && !slices.Contains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) {
|
||||
acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string))
|
||||
}
|
||||
if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) {
|
||||
acmeIssuer.Challenges = &caddytls.ChallengesConfig{
|
||||
DNS: &caddytls.DNSChallengeConfig{
|
||||
ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil),
|
||||
},
|
||||
if globalACMEDNSok && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil || acmeIssuer.Challenges.DNS.ProviderRaw == nil) {
|
||||
globalDNS := options["dns"]
|
||||
if globalDNS == nil && globalACMEDNS == nil {
|
||||
return fmt.Errorf("acme_dns specified without DNS provider config, but no provider specified with 'dns' global option")
|
||||
}
|
||||
if acmeIssuer.Challenges == nil {
|
||||
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
|
||||
}
|
||||
if acmeIssuer.Challenges.DNS == nil {
|
||||
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
|
||||
}
|
||||
// If global `dns` is set, do NOT set provider in issuer, just set empty dns config
|
||||
if globalDNS == nil && acmeIssuer.Challenges.DNS.ProviderRaw == nil {
|
||||
// Set a global DNS provider if `acme_dns` is set and `dns` is NOT set
|
||||
acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil)
|
||||
}
|
||||
}
|
||||
if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil {
|
||||
|
@ -596,6 +607,20 @@ func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) e
|
|||
}
|
||||
acmeIssuer.Challenges.TLSALPN.AlternatePort = globalHTTPSPort.(int)
|
||||
}
|
||||
// If BindHost is still unset, fall back to the first default_bind address if set
|
||||
// This avoids binding the automation policy to the wildcard socket, which is unexpected behavior when a more selective socket is specified via default_bind
|
||||
// In BSD it is valid to bind to the wildcard socket even though a more selective socket is already open (still unexpected behavior by the caller though)
|
||||
// In Linux the same call will error with EADDRINUSE whenever the listener for the automation policy is opened
|
||||
if acmeIssuer.Challenges == nil || (acmeIssuer.Challenges.DNS == nil && acmeIssuer.Challenges.BindHost == "") {
|
||||
if defBinds, ok := globalDefaultBind.([]ConfigValue); ok && len(defBinds) > 0 {
|
||||
if abp, ok := defBinds[0].Value.(addressesWithProtocols); ok && len(abp.addresses) > 0 {
|
||||
if acmeIssuer.Challenges == nil {
|
||||
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
|
||||
}
|
||||
acmeIssuer.Challenges.BindHost = abp.addresses[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
if globalCertLifetime != nil && acmeIssuer.CertificateLifetime == 0 {
|
||||
acmeIssuer.CertificateLifetime = globalCertLifetime.(caddy.Duration)
|
||||
}
|
||||
|
@ -616,12 +641,18 @@ func newBaseAutomationPolicy(
|
|||
_, hasLocalCerts := options["local_certs"]
|
||||
keyType, hasKeyType := options["key_type"]
|
||||
ocspStapling, hasOCSPStapling := options["ocsp_stapling"]
|
||||
|
||||
hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling
|
||||
|
||||
globalACMECA := options["acme_ca"]
|
||||
globalACMECARoot := options["acme_ca_root"]
|
||||
_, globalACMEDNS := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set
|
||||
globalACMEEAB := options["acme_eab"]
|
||||
globalPreferredChains := options["preferred_chains"]
|
||||
hasGlobalACMEDefaults := globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS || globalACMEEAB != nil || globalPreferredChains != nil
|
||||
|
||||
// if there are no global options related to automation policies
|
||||
// set, then we can just return right away
|
||||
if !hasGlobalAutomationOpts {
|
||||
if !hasGlobalAutomationOpts && !hasGlobalACMEDefaults {
|
||||
if always {
|
||||
return new(caddytls.AutomationPolicy), nil
|
||||
}
|
||||
|
@ -643,6 +674,14 @@ func newBaseAutomationPolicy(
|
|||
ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)}
|
||||
}
|
||||
|
||||
if hasGlobalACMEDefaults {
|
||||
for i := range ap.Issuers {
|
||||
if err := fillInGlobalACMEDefaults(ap.Issuers[i], options); err != nil {
|
||||
return nil, fmt.Errorf("filling in global issuer defaults for issuer %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasOCSPStapling {
|
||||
ocspConfig := ocspStapling.(certmagic.OCSPConfig)
|
||||
ap.DisableOCSPStapling = ocspConfig.DisableStapling
|
||||
|
|
|
@ -121,6 +121,13 @@ func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
|
|||
}
|
||||
}
|
||||
|
||||
// If this request changed the config, clear the last
|
||||
// config info we have stored, if it is different from
|
||||
// the original source.
|
||||
caddy.ClearLastConfigIfDifferent(
|
||||
r.Header.Get("Caddy-Config-Source-File"),
|
||||
r.Header.Get("Caddy-Config-Source-Adapter"))
|
||||
|
||||
caddy.Log().Named("admin.api").Info("load complete")
|
||||
|
||||
return nil
|
||||
|
|
|
@ -281,7 +281,7 @@ func validateTestPrerequisites(tc *Tester) error {
|
|||
tc.t.Cleanup(func() {
|
||||
os.Remove(f.Name())
|
||||
})
|
||||
if _, err := f.WriteString(fmt.Sprintf(initConfig, tc.config.AdminPort)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, initConfig, tc.config.AdminPort); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -12,13 +12,14 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
"github.com/mholt/acmez/v3"
|
||||
"github.com/mholt/acmez/v3/acme"
|
||||
smallstepacme "github.com/smallstep/certificates/acme"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/exp/zapslog"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
)
|
||||
|
||||
const acmeChallengePort = 9081
|
||||
|
|
|
@ -9,11 +9,12 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
"github.com/mholt/acmez/v3"
|
||||
"github.com/mholt/acmez/v3/acme"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/exp/zapslog"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
)
|
||||
|
||||
func TestACMEServerDirectory(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
{
|
||||
acme_dns mock foo
|
||||
}
|
||||
|
||||
example.com {
|
||||
respond "Hello World"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Hello World",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"argument": "foo",
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
{
|
||||
dns mock
|
||||
acme_dns
|
||||
}
|
||||
|
||||
example.com {
|
||||
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"dns": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
acme_dns
|
||||
}
|
||||
|
||||
example.com {
|
||||
respond "Hello World"
|
||||
}
|
||||
----------
|
||||
acme_dns specified without DNS provider config, but no provider specified with 'dns' global option
|
|
@ -0,0 +1,72 @@
|
|||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
allow {
|
||||
domains host-1.internal.example.com host-2.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"allow": {
|
||||
"domains": [
|
||||
"host-1.internal.example.com",
|
||||
"host-2.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
allow {
|
||||
domains host-1.internal.example.com host-2.internal.example.com
|
||||
}
|
||||
deny {
|
||||
domains dc.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"allow": {
|
||||
"domains": [
|
||||
"host-1.internal.example.com",
|
||||
"host-2.internal.example.com"
|
||||
]
|
||||
},
|
||||
"deny": {
|
||||
"domains": [
|
||||
"dc.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
deny {
|
||||
domains dc.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"deny": {
|
||||
"domains": [
|
||||
"dc.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
example.com
|
||||
handle {
|
||||
respond "one"
|
||||
}
|
||||
|
||||
example.com
|
||||
handle {
|
||||
respond "two"
|
||||
}
|
||||
----------
|
||||
Caddyfile:6: unrecognized directive: example.com
|
||||
Did you mean to define a second site? If so, you must use curly braces around each site to separate their configurations.
|
|
@ -0,0 +1,9 @@
|
|||
:8080 {
|
||||
respond "one"
|
||||
}
|
||||
|
||||
:8080 {
|
||||
respond "two"
|
||||
}
|
||||
----------
|
||||
ambiguous site definition: :8080
|
|
@ -1,109 +0,0 @@
|
|||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
*.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "fallback"
|
||||
}
|
||||
|
||||
foo.example.com {
|
||||
respond "foo"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"*.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,268 +0,0 @@
|
|||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
# Covers two domains
|
||||
*.one.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "one fallback"
|
||||
}
|
||||
|
||||
# Is covered, should not get its own AP
|
||||
foo.one.example.com {
|
||||
respond "foo one"
|
||||
}
|
||||
|
||||
# This one has its own tls config so it doesn't get covered (escape hatch)
|
||||
bar.one.example.com {
|
||||
respond "bar one"
|
||||
tls bar@bar.com
|
||||
}
|
||||
|
||||
# Covers nothing but AP gets consolidated with the first
|
||||
*.two.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "two fallback"
|
||||
}
|
||||
|
||||
# Is HTTP so it should not cover
|
||||
http://*.three.example.com {
|
||||
respond "three fallback"
|
||||
}
|
||||
|
||||
# Has no wildcard coverage so it gets an AP
|
||||
foo.three.example.com {
|
||||
respond "foo three"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo three",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"bar.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "bar one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "one fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.two.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "two fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.one.example.com",
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
},
|
||||
"srv1": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "three fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
},
|
||||
{
|
||||
"ca": "https://acme.zerossl.com/v2/DV90",
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"*.one.example.com",
|
||||
"*.two.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
handle
|
||||
|
||||
respond "should not work"
|
||||
----------
|
||||
Caddyfile:1: parsed 'handle' as a site address, but it is a known directive; directives must appear in a site block
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
servers {
|
||||
srv0 {
|
||||
listen :8080
|
||||
}
|
||||
srv1 {
|
||||
listen :8080
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
parsing caddyfile tokens for 'servers': unrecognized servers option 'srv0', at Caddyfile:3
|
|
@ -106,20 +106,29 @@ example.com {
|
|||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group0",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "rewrite",
|
||||
"uri": "/{http.error.status_code}.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group0",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "rewrite",
|
||||
"uri": "/{http.error.status_code}.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -165,8 +165,17 @@ bar.localhost {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -178,8 +187,17 @@ bar.localhost {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599]",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599]",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -208,8 +226,17 @@ bar.localhost {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error from second site",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -221,8 +248,17 @@ bar.localhost {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599] from second site",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599] from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
|
|
@ -96,8 +96,17 @@ localhost:3010 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
|
|
@ -116,8 +116,17 @@ localhost:2099 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -129,8 +138,17 @@ localhost:2099 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error code is equal to 500 or in the [300..399] range",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error code is equal to 500 or in the [300..399] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
|
|
@ -96,8 +96,17 @@ localhost:3010 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
|
|
@ -116,8 +116,17 @@ localhost:2099 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -129,8 +138,17 @@ localhost:2099 {
|
|||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Fallback route: code outside the [400..499] range",
|
||||
"handler": "static_response"
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Fallback route: code outside the [400..499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
{
|
||||
http_port 2099
|
||||
}
|
||||
localhost:2099 {
|
||||
root * /var/www/
|
||||
file_server
|
||||
|
||||
handle_errors 404 {
|
||||
handle /en/* {
|
||||
respond "not found" 404
|
||||
}
|
||||
handle /es/* {
|
||||
respond "no encontrado"
|
||||
}
|
||||
handle {
|
||||
respond "default not found"
|
||||
}
|
||||
}
|
||||
handle_errors {
|
||||
handle /en/* {
|
||||
respond "English error"
|
||||
}
|
||||
handle /es/* {
|
||||
respond "Spanish error"
|
||||
}
|
||||
handle {
|
||||
respond "Default error"
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"http_port": 2099,
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":2099"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "vars",
|
||||
"root": "/var/www/"
|
||||
},
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"errors": {
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "not found",
|
||||
"handler": "static_response",
|
||||
"status_code": 404
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/en/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "no encontrado",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/es/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "default not found",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"expression": "{http.error.status_code} in [404]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "English error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/en/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Spanish error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/es/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Default error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -31,9 +31,6 @@ example.com
|
|||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"module": "acme",
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
trusted_proxies static private_ranges
|
||||
client_ip_headers Custom-Real-Client-IP X-Forwarded-For
|
||||
client_ip_headers A-Third-One
|
||||
keepalive_interval 20s
|
||||
keepalive_idle 20s
|
||||
keepalive_count 10
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,6 +48,9 @@ foo.com {
|
|||
"read_header_timeout": 30000000000,
|
||||
"write_timeout": 30000000000,
|
||||
"idle_timeout": 30000000000,
|
||||
"keepalive_interval": 20000000000,
|
||||
"keepalive_idle": 20000000000,
|
||||
"keepalive_count": 10,
|
||||
"max_header_bytes": 100000000,
|
||||
"enable_full_duplex": true,
|
||||
"routes": [
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
:80 {
|
||||
header Test-Static ":443" "STATIC-WORKS"
|
||||
header Test-Dynamic ":{http.request.local.port}" "DYNAMIC-WORKS"
|
||||
header Test-Complex "port-{http.request.local.port}-end" "COMPLEX-{http.request.method}"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "headers",
|
||||
"response": {
|
||||
"replace": {
|
||||
"Test-Static": [
|
||||
{
|
||||
"replace": "STATIC-WORKS",
|
||||
"search_regexp": ":443"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"handler": "headers",
|
||||
"response": {
|
||||
"replace": {
|
||||
"Test-Dynamic": [
|
||||
{
|
||||
"replace": "DYNAMIC-WORKS",
|
||||
"search_regexp": ":{http.request.local.port}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"handler": "headers",
|
||||
"response": {
|
||||
"replace": {
|
||||
"Test-Complex": [
|
||||
{
|
||||
"replace": "COMPLEX-{http.request.method}",
|
||||
"search_regexp": "port-{http.request.local.port}-end"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<END
|
||||
line1
|
||||
line2
|
||||
END
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": " line1\n line2",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<EOF
|
||||
Hello
|
||||
# missing EOF marker
|
||||
}
|
||||
----------
|
||||
mismatched leading whitespace in heredoc <<EOF on line #5 [ Hello], expected whitespace [# missing ] to match the closing marker
|
|
@ -0,0 +1,9 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<END!
|
||||
Hello
|
||||
END!
|
||||
}
|
||||
----------
|
||||
heredoc marker on line #4 must contain only alpha-numeric characters, dashes and underscores; got 'END!'
|
|
@ -0,0 +1,10 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<END
|
||||
line1
|
||||
line2
|
||||
END
|
||||
}
|
||||
----------
|
||||
mismatched leading whitespace in heredoc <<END on line #5 [ line1], expected whitespace [ ] to match the closing marker
|
|
@ -0,0 +1,9 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<
|
||||
Hello
|
||||
END
|
||||
}
|
||||
----------
|
||||
parsing caddyfile tokens for 'handle': unrecognized directive: Hello - are you sure your Caddyfile structure (nesting and braces) is correct?, at Caddyfile:7
|
|
@ -0,0 +1,9 @@
|
|||
:80
|
||||
|
||||
handle {
|
||||
respond <<<END
|
||||
Hello
|
||||
END
|
||||
}
|
||||
----------
|
||||
too many '<' for heredoc on line #4; only use two, for example <<END
|
|
@ -0,0 +1,13 @@
|
|||
(site) {
|
||||
http://{args[0]} https://{args[0]} {
|
||||
{block}
|
||||
}
|
||||
}
|
||||
import site test.domain {
|
||||
{
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {remote_host}
|
||||
}
|
||||
}
|
||||
----------
|
||||
anonymous blocks are not supported
|
|
@ -0,0 +1,57 @@
|
|||
(snippet) {
|
||||
header {
|
||||
reverse_proxy localhost:3000
|
||||
{block}
|
||||
}
|
||||
}
|
||||
|
||||
example.com {
|
||||
import snippet
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "headers",
|
||||
"response": {
|
||||
"set": {
|
||||
"Reverse_proxy": [
|
||||
"localhost:3000"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
(snippet) {
|
||||
header {
|
||||
reverse_proxy localhost:3000
|
||||
{blocks.content_type}
|
||||
}
|
||||
}
|
||||
|
||||
example.com {
|
||||
import snippet
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "headers",
|
||||
"response": {
|
||||
"set": {
|
||||
"Reverse_proxy": [
|
||||
"localhost:3000"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
(site) {
|
||||
https://{args[0]} {
|
||||
{block}
|
||||
}
|
||||
}
|
||||
|
||||
import site test.domain {
|
||||
reverse_proxy http://192.168.1.1:8080 {
|
||||
header_up Host {host}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"test.domain"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"headers": {
|
||||
"request": {
|
||||
"set": {
|
||||
"Host": [
|
||||
"{http.request.host}"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "192.168.1.1:8080"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
(import1) {
|
||||
import import2
|
||||
}
|
||||
|
||||
(import2) {
|
||||
import import1
|
||||
}
|
||||
|
||||
import import1
|
||||
|
||||
----------
|
||||
a cycle of imports exists between Caddyfile:import2 and Caddyfile:import1
|
|
@ -0,0 +1,5 @@
|
|||
example.com {
|
||||
invoke foo
|
||||
}
|
||||
----------
|
||||
cannot invoke named route 'foo', which was not defined
|
|
@ -0,0 +1,95 @@
|
|||
:80
|
||||
|
||||
log {
|
||||
output stdout
|
||||
format filter {
|
||||
wrap console
|
||||
|
||||
# Multiple regexp filters for the same field - this should work now!
|
||||
request>headers>Authorization regexp "Bearer\s+([A-Za-z0-9_-]+)" "Bearer [REDACTED]"
|
||||
request>headers>Authorization regexp "Basic\s+([A-Za-z0-9+/=]+)" "Basic [REDACTED]"
|
||||
request>headers>Authorization regexp "token=([^&\s]+)" "token=[REDACTED]"
|
||||
|
||||
# Single regexp filter - this should continue to work as before
|
||||
request>headers>Cookie regexp "sessionid=[^;]+" "sessionid=[REDACTED]"
|
||||
|
||||
# Mixed filters (non-regexp) - these should work normally
|
||||
request>headers>Server delete
|
||||
request>remote_ip ip_mask {
|
||||
ipv4 24
|
||||
ipv6 32
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"logging": {
|
||||
"logs": {
|
||||
"default": {
|
||||
"exclude": [
|
||||
"http.log.access.log0"
|
||||
]
|
||||
},
|
||||
"log0": {
|
||||
"writer": {
|
||||
"output": "stdout"
|
||||
},
|
||||
"encoder": {
|
||||
"fields": {
|
||||
"request\u003eheaders\u003eAuthorization": {
|
||||
"filter": "multi_regexp",
|
||||
"operations": [
|
||||
{
|
||||
"regexp": "Bearer\\s+([A-Za-z0-9_-]+)",
|
||||
"value": "Bearer [REDACTED]"
|
||||
},
|
||||
{
|
||||
"regexp": "Basic\\s+([A-Za-z0-9+/=]+)",
|
||||
"value": "Basic [REDACTED]"
|
||||
},
|
||||
{
|
||||
"regexp": "token=([^\u0026\\s]+)",
|
||||
"value": "token=[REDACTED]"
|
||||
}
|
||||
]
|
||||
},
|
||||
"request\u003eheaders\u003eCookie": {
|
||||
"filter": "regexp",
|
||||
"regexp": "sessionid=[^;]+",
|
||||
"value": "sessionid=[REDACTED]"
|
||||
},
|
||||
"request\u003eheaders\u003eServer": {
|
||||
"filter": "delete"
|
||||
},
|
||||
"request\u003eremote_ip": {
|
||||
"filter": "ip_mask",
|
||||
"ipv4_cidr": 24,
|
||||
"ipv6_cidr": 32
|
||||
}
|
||||
},
|
||||
"format": "filter",
|
||||
"wrap": {
|
||||
"format": "console"
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"http.log.access.log0"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"logs": {
|
||||
"default_logger_name": "log0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
@foo {
|
||||
path /foo
|
||||
}
|
||||
|
||||
handle {
|
||||
respond "should not work"
|
||||
}
|
||||
----------
|
||||
request matchers may not be defined globally, they must be in a site block; found @foo, at Caddyfile:1
|
|
@ -0,0 +1,41 @@
|
|||
:8884
|
||||
reverse_proxy 127.0.0.1:65535 {
|
||||
transport http {
|
||||
forward_proxy_url http://localhost:8080
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8884"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"transport": {
|
||||
"network_proxy": {
|
||||
"from": "url",
|
||||
"url": "http://localhost:8080"
|
||||
},
|
||||
"protocol": "http"
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "127.0.0.1:65535"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
:8884
|
||||
reverse_proxy 127.0.0.1:65535 {
|
||||
transport http {
|
||||
network_proxy none
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8884"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"transport": {
|
||||
"network_proxy": {
|
||||
"from": "none"
|
||||
},
|
||||
"protocol": "http"
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "127.0.0.1:65535"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
:8884
|
||||
reverse_proxy 127.0.0.1:65535 {
|
||||
transport http {
|
||||
network_proxy url http://localhost:8080
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8884"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"transport": {
|
||||
"network_proxy": {
|
||||
"from": "url",
|
||||
"url": "http://localhost:8080"
|
||||
},
|
||||
"protocol": "http"
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "127.0.0.1:65535"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
{
|
||||
servers {
|
||||
trusted_proxies_unix
|
||||
}
|
||||
}
|
||||
|
||||
example.com {
|
||||
reverse_proxy https://local:8080
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"transport": {
|
||||
"protocol": "http",
|
||||
"tls": {}
|
||||
},
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "local:8080"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"trusted_proxies_unix": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
:70000
|
||||
|
||||
handle {
|
||||
respond "should not work"
|
||||
}
|
||||
----------
|
||||
port 70000 is out of range
|
|
@ -0,0 +1,7 @@
|
|||
:-1
|
||||
|
||||
handle {
|
||||
respond "should not work"
|
||||
}
|
||||
----------
|
||||
port -1 is out of range
|
|
@ -0,0 +1,7 @@
|
|||
foo://example.com
|
||||
|
||||
handle {
|
||||
respond "hello"
|
||||
}
|
||||
----------
|
||||
unsupported URL scheme foo://
|
|
@ -0,0 +1,7 @@
|
|||
wss://example.com:70000
|
||||
|
||||
handle {
|
||||
respond "should not work"
|
||||
}
|
||||
----------
|
||||
port 70000 is out of range
|
|
@ -0,0 +1,7 @@
|
|||
wss://example.com
|
||||
|
||||
handle {
|
||||
respond "hello"
|
||||
}
|
||||
----------
|
||||
the scheme wss:// is only supported in browsers; use https:// instead
|
|
@ -131,13 +131,7 @@ shadowed.example.com {
|
|||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated1.example.com"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated1.example.com",
|
||||
"automated2.example.com"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
file ../caddy.ca.cer
|
||||
file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
folder ../
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf folder ../
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
folder ../
|
||||
folder ../
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
},
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
localhost
|
||||
|
||||
tls {
|
||||
propagation_delay 10s
|
||||
dns_ttl 5m
|
||||
}
|
||||
|
||||
----------
|
||||
parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_delay, dns_ttl] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:6
|
|
@ -0,0 +1,79 @@
|
|||
{
|
||||
acme_dns mock foo
|
||||
}
|
||||
|
||||
localhost {
|
||||
tls {
|
||||
dns mock bar
|
||||
resolvers 8.8.8.8 8.8.4.4
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"localhost"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"argument": "bar",
|
||||
"name": "mock"
|
||||
},
|
||||
"resolvers": [
|
||||
"8.8.8.8",
|
||||
"8.8.4.4"
|
||||
]
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"argument": "foo",
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
{
|
||||
dns mock foo
|
||||
}
|
||||
|
||||
localhost {
|
||||
tls {
|
||||
dns mock bar
|
||||
resolvers 8.8.8.8 8.8.4.4
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"localhost"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"argument": "bar",
|
||||
"name": "mock"
|
||||
},
|
||||
"resolvers": [
|
||||
"8.8.8.8",
|
||||
"8.8.4.4"
|
||||
]
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"dns": {
|
||||
"argument": "foo",
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
:443 {
|
||||
tls {
|
||||
propagation_timeout 30s
|
||||
}
|
||||
}
|
||||
----------
|
||||
parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_timeout] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:4
|
|
@ -0,0 +1,7 @@
|
|||
:443 {
|
||||
tls {
|
||||
propagation_delay 30s
|
||||
}
|
||||
}
|
||||
----------
|
||||
parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_delay] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:4
|
|
@ -0,0 +1,76 @@
|
|||
{
|
||||
acme_dns mock
|
||||
}
|
||||
|
||||
localhost {
|
||||
tls {
|
||||
resolvers 8.8.8.8 8.8.4.4
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"localhost"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
},
|
||||
"resolvers": [
|
||||
"8.8.8.8",
|
||||
"8.8.4.4"
|
||||
]
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ localhost
|
|||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
dns mock
|
||||
dns_ttl 5m10s
|
||||
}
|
||||
----------
|
||||
|
@ -54,6 +55,9 @@ tls {
|
|||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
},
|
||||
"ttl": 310000000000
|
||||
}
|
||||
},
|
||||
|
|
|
@ -2,6 +2,7 @@ localhost
|
|||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
dns mock
|
||||
propagation_delay 5m10s
|
||||
propagation_timeout 10m20s
|
||||
}
|
||||
|
@ -56,7 +57,10 @@ tls {
|
|||
"challenges": {
|
||||
"dns": {
|
||||
"propagation_delay": 310000000000,
|
||||
"propagation_timeout": 620000000000
|
||||
"propagation_timeout": 620000000000,
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig"
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
|
||||
_ "github.com/caddyserver/caddy/v2/internal/testmocks"
|
||||
)
|
||||
|
||||
|
@ -28,30 +28,48 @@ func TestCaddyfileAdaptToJSON(t *testing.T) {
|
|||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// read the test file
|
||||
filename := f.Name()
|
||||
data, err := os.ReadFile("./caddyfile_adapt/" + filename)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %s dir: %s", filename, err)
|
||||
}
|
||||
|
||||
// split the Caddyfile (first) and JSON (second) parts
|
||||
// (append newline to Caddyfile to match formatter expectations)
|
||||
parts := strings.Split(string(data), "----------")
|
||||
caddyfile, json := strings.TrimSpace(parts[0])+"\n", strings.TrimSpace(parts[1])
|
||||
// run each file as a subtest, so that we can see which one fails more easily
|
||||
t.Run(filename, func(t *testing.T) {
|
||||
// read the test file
|
||||
data, err := os.ReadFile("./caddyfile_adapt/" + filename)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %s dir: %s", filename, err)
|
||||
}
|
||||
|
||||
// replace windows newlines in the json with unix newlines
|
||||
json = winNewlines.ReplaceAllString(json, "\n")
|
||||
// split the Caddyfile (first) and JSON (second) parts
|
||||
// (append newline to Caddyfile to match formatter expectations)
|
||||
parts := strings.Split(string(data), "----------")
|
||||
caddyfile, expected := strings.TrimSpace(parts[0])+"\n", strings.TrimSpace(parts[1])
|
||||
|
||||
// replace os-specific default path for file_server's hide field
|
||||
replacePath, _ := jsonMod.Marshal(fmt.Sprint(".", string(filepath.Separator), "Caddyfile"))
|
||||
json = strings.ReplaceAll(json, `"./Caddyfile"`, string(replacePath))
|
||||
// replace windows newlines in the json with unix newlines
|
||||
expected = winNewlines.ReplaceAllString(expected, "\n")
|
||||
|
||||
// run the test
|
||||
ok := caddytest.CompareAdapt(t, filename, caddyfile, "caddyfile", json)
|
||||
if !ok {
|
||||
t.Errorf("failed to adapt %s", filename)
|
||||
}
|
||||
// replace os-specific default path for file_server's hide field
|
||||
replacePath, _ := jsonMod.Marshal(fmt.Sprint(".", string(filepath.Separator), "Caddyfile"))
|
||||
expected = strings.ReplaceAll(expected, `"./Caddyfile"`, string(replacePath))
|
||||
|
||||
// if the expected output is JSON, compare it
|
||||
if len(expected) > 0 && expected[0] == '{' {
|
||||
ok := caddytest.CompareAdapt(t, filename, caddyfile, "caddyfile", expected)
|
||||
if !ok {
|
||||
t.Errorf("failed to adapt %s", filename)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, adapt the Caddyfile and check for errors
|
||||
cfgAdapter := caddyconfig.GetAdapter("caddyfile")
|
||||
_, _, err = cfgAdapter.Adapt([]byte(caddyfile), nil)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for %s but got none", filename)
|
||||
} else {
|
||||
normalizedErr := winNewlines.ReplaceAllString(err.Error(), "\n")
|
||||
if !strings.Contains(normalizedErr, expected) {
|
||||
t.Errorf("expected error for %s to contain:\n%s\nbut got:\n%s", filename, expected, normalizedErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue