Compare commits
61 Commits
main
...
postgres-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
11c79a6369 | ||
|
|
10a6939d8e | ||
|
|
9736973286 | ||
|
|
039566bcaf | ||
|
|
544ce112b5 | ||
|
|
c19377109e | ||
|
|
ccbd02331c | ||
|
|
542aa403d2 | ||
|
|
ebb48e217d | ||
|
|
7710ace184 | ||
|
|
5c26e70fe7 | ||
|
|
c66fa92341 | ||
|
|
a7d5a9c5d8 | ||
|
|
391cd2c920 | ||
|
|
9eec72adcc | ||
|
|
28a436c0d2 | ||
|
|
b02366b42b | ||
|
|
90d0eca14d | ||
|
|
811c7ae25a | ||
|
|
850a9d4cc4 | ||
|
|
43280fbc0a | ||
|
|
35a54407a8 | ||
|
|
f726cc768e | ||
|
|
5d301e7dce | ||
|
|
6375c2ce60 | ||
|
|
459c80ef9b | ||
|
|
b1eb90addc | ||
|
|
4b6979aa89 | ||
|
|
c76e39bb0e | ||
|
|
b82e1c3915 | ||
|
|
07e60ba041 | ||
|
|
7c69a76345 | ||
|
|
a28d8e7924 | ||
|
|
13a3062a7f | ||
|
|
eb6e1ac44a | ||
|
|
a4c836b531 | ||
|
|
e818b063f7 | ||
|
|
039d555689 | ||
|
|
209d5a4c62 | ||
|
|
bf265449ac | ||
|
|
4cbd80c68e | ||
|
|
305e3fc9af | ||
|
|
9e4a48b058 | ||
|
|
939b3d1117 | ||
|
|
9cc9891f49 | ||
|
|
0d1f3444f2 | ||
|
|
2716ede6e1 | ||
|
|
ae5e1fe8d8 | ||
|
|
e3a402ed95 | ||
|
|
1abc1005d0 | ||
|
|
909c3fe17b | ||
|
|
07c3e280bf | ||
|
|
b567b4e904 | ||
|
|
60fa50f0d5 | ||
|
|
82e15d84bd | ||
|
|
4e5f95ba0c | ||
|
|
869b972a50 | ||
|
|
bdd20197b3 | ||
|
|
a8dcecdb6d | ||
|
|
5331437664 | ||
|
|
e432bf2886 |
16
.github/workflows/release.yaml
vendored
16
.github/workflows/release.yaml
vendored
@@ -6,6 +6,22 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:17
|
||||||
|
env:
|
||||||
|
POSTGRES_USER: ntfy
|
||||||
|
POSTGRES_PASSWORD: ntfy
|
||||||
|
POSTGRES_DB: ntfy_test
|
||||||
|
ports:
|
||||||
|
- 5432:5432
|
||||||
|
options: >-
|
||||||
|
--health-cmd "pg_isready -U ntfy"
|
||||||
|
--health-interval 10s
|
||||||
|
--health-timeout 5s
|
||||||
|
--health-retries 5
|
||||||
|
env:
|
||||||
|
NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|||||||
18
.github/workflows/test.yaml
vendored
18
.github/workflows/test.yaml
vendored
@@ -3,6 +3,22 @@ on: [ push, pull_request ]
|
|||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:17
|
||||||
|
env:
|
||||||
|
POSTGRES_USER: ntfy
|
||||||
|
POSTGRES_PASSWORD: ntfy
|
||||||
|
POSTGRES_DB: ntfy_test
|
||||||
|
ports:
|
||||||
|
- 5432:5432
|
||||||
|
options: >-
|
||||||
|
--health-cmd "pg_isready -U ntfy"
|
||||||
|
--health-interval 10s
|
||||||
|
--health-timeout 5s
|
||||||
|
--health-retries 5
|
||||||
|
env:
|
||||||
|
NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -23,7 +39,7 @@ jobs:
|
|||||||
- name: Build web app (required for tests)
|
- name: Build web app (required for tests)
|
||||||
run: make web
|
run: make web
|
||||||
- name: Run tests, formatting, vetting and linting
|
- name: Run tests, formatting, vetting and linting
|
||||||
run: make check
|
run: make checkv
|
||||||
- name: Run coverage
|
- name: Run coverage
|
||||||
run: make coverage
|
run: make coverage
|
||||||
- name: Upload coverage to codecov.io
|
- name: Upload coverage to codecov.io
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,6 +7,7 @@ build/
|
|||||||
server/docs/
|
server/docs/
|
||||||
server/site/
|
server/site/
|
||||||
tools/fbsend/fbsend
|
tools/fbsend/fbsend
|
||||||
|
tools/pgimport/pgimport
|
||||||
playground/
|
playground/
|
||||||
secrets/
|
secrets/
|
||||||
*.iml
|
*.iml
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -265,11 +265,13 @@ cli-build-results:
|
|||||||
|
|
||||||
check: test web-fmt-check fmt-check vet web-lint lint staticcheck
|
check: test web-fmt-check fmt-check vet web-lint lint staticcheck
|
||||||
|
|
||||||
|
checkv: testv web-fmt-check fmt-check vet web-lint lint staticcheck
|
||||||
|
|
||||||
test: .PHONY
|
test: .PHONY
|
||||||
go test $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
go test -parallel 3 $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
||||||
|
|
||||||
testv: .PHONY
|
testv: .PHONY
|
||||||
go test -v $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
go test -v -parallel 3 $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
||||||
|
|
||||||
race: .PHONY
|
race: .PHONY
|
||||||
go test -v -race $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
go test -v -race $(shell go list ./... | grep -vE 'ntfy/(test|examples|tools)')
|
||||||
|
|||||||
36
cmd/serve.go
36
cmd/serve.go
@@ -39,6 +39,7 @@ var flagsServe = append(
|
|||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "key-file", Aliases: []string{"key_file", "K"}, EnvVars: []string{"NTFY_KEY_FILE"}, Usage: "private key file, if listen-https is set"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "key-file", Aliases: []string{"key_file", "K"}, EnvVars: []string{"NTFY_KEY_FILE"}, Usage: "private key file, if listen-https is set"}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "cert-file", Aliases: []string{"cert_file", "E"}, EnvVars: []string{"NTFY_CERT_FILE"}, Usage: "certificate file, if listen-https is set"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "cert-file", Aliases: []string{"cert_file", "E"}, EnvVars: []string{"NTFY_CERT_FILE"}, Usage: "certificate file, if listen-https is set"}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "firebase-key-file", Aliases: []string{"firebase_key_file", "F"}, EnvVars: []string{"NTFY_FIREBASE_KEY_FILE"}, Usage: "Firebase credentials file; if set additionally publish to FCM topic"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "firebase-key-file", Aliases: []string{"firebase_key_file", "F"}, EnvVars: []string{"NTFY_FIREBASE_KEY_FILE"}, Usage: "Firebase credentials file; if set additionally publish to FCM topic"}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "database-url", Aliases: []string{"database_url"}, EnvVars: []string{"NTFY_DATABASE_URL"}, Usage: "PostgreSQL connection string for database-backed stores (e.g. postgres://user:pass@host:5432/ntfy)"}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "cache-file", Aliases: []string{"cache_file", "C"}, EnvVars: []string{"NTFY_CACHE_FILE"}, Usage: "cache file used for message caching"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "cache-file", Aliases: []string{"cache_file", "C"}, EnvVars: []string{"NTFY_CACHE_FILE"}, Usage: "cache file used for message caching"}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "cache-duration", Aliases: []string{"cache_duration", "b"}, EnvVars: []string{"NTFY_CACHE_DURATION"}, Value: util.FormatDuration(server.DefaultCacheDuration), Usage: "buffer messages for this time to allow `since` requests"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "cache-duration", Aliases: []string{"cache_duration", "b"}, EnvVars: []string{"NTFY_CACHE_DURATION"}, Value: util.FormatDuration(server.DefaultCacheDuration), Usage: "buffer messages for this time to allow `since` requests"}),
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{Name: "cache-batch-size", Aliases: []string{"cache_batch_size"}, EnvVars: []string{"NTFY_BATCH_SIZE"}, Usage: "max size of messages to batch together when writing to message cache (if zero, writes are synchronous)"}),
|
altsrc.NewIntFlag(&cli.IntFlag{Name: "cache-batch-size", Aliases: []string{"cache_batch_size"}, EnvVars: []string{"NTFY_BATCH_SIZE"}, Usage: "max size of messages to batch together when writing to message cache (if zero, writes are synchronous)"}),
|
||||||
@@ -143,6 +144,7 @@ func execServe(c *cli.Context) error {
|
|||||||
keyFile := c.String("key-file")
|
keyFile := c.String("key-file")
|
||||||
certFile := c.String("cert-file")
|
certFile := c.String("cert-file")
|
||||||
firebaseKeyFile := c.String("firebase-key-file")
|
firebaseKeyFile := c.String("firebase-key-file")
|
||||||
|
databaseURL := c.String("database-url")
|
||||||
webPushPrivateKey := c.String("web-push-private-key")
|
webPushPrivateKey := c.String("web-push-private-key")
|
||||||
webPushPublicKey := c.String("web-push-public-key")
|
webPushPublicKey := c.String("web-push-public-key")
|
||||||
webPushFile := c.String("web-push-file")
|
webPushFile := c.String("web-push-file")
|
||||||
@@ -280,12 +282,14 @@ func execServe(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check values
|
// Check values
|
||||||
if firebaseKeyFile != "" && !util.FileExists(firebaseKeyFile) {
|
if databaseURL != "" && (authFile != "" || cacheFile != "" || webPushFile != "") {
|
||||||
|
return errors.New("if database-url is set, auth-file, cache-file, and web-push-file must not be set")
|
||||||
|
} else if firebaseKeyFile != "" && !util.FileExists(firebaseKeyFile) {
|
||||||
return errors.New("if set, FCM key file must exist")
|
return errors.New("if set, FCM key file must exist")
|
||||||
} else if firebaseKeyFile != "" && !server.FirebaseAvailable {
|
} else if firebaseKeyFile != "" && !server.FirebaseAvailable {
|
||||||
return errors.New("cannot set firebase-key-file, support for Firebase is not available (nofirebase)")
|
return errors.New("cannot set firebase-key-file, support for Firebase is not available (nofirebase)")
|
||||||
} else if webPushPublicKey != "" && (webPushPrivateKey == "" || webPushFile == "" || webPushEmailAddress == "" || baseURL == "") {
|
} else if webPushPublicKey != "" && (webPushPrivateKey == "" || (webPushFile == "" && databaseURL == "") || webPushEmailAddress == "" || baseURL == "") {
|
||||||
return errors.New("if web push is enabled, web-push-private-key, web-push-public-key, web-push-file, web-push-email-address, and base-url should be set. run 'ntfy webpush keys' to generate keys")
|
return errors.New("if web push is enabled, web-push-private-key, web-push-public-key, web-push-file (or database-url), web-push-email-address, and base-url should be set. run 'ntfy webpush keys' to generate keys")
|
||||||
} else if keepaliveInterval < 5*time.Second {
|
} else if keepaliveInterval < 5*time.Second {
|
||||||
return errors.New("keepalive interval cannot be lower than five seconds")
|
return errors.New("keepalive interval cannot be lower than five seconds")
|
||||||
} else if managerInterval < 5*time.Second {
|
} else if managerInterval < 5*time.Second {
|
||||||
@@ -321,8 +325,8 @@ func execServe(c *cli.Context) error {
|
|||||||
return errors.New("if upstream-base-url is set, base-url must also be set")
|
return errors.New("if upstream-base-url is set, base-url must also be set")
|
||||||
} else if upstreamBaseURL != "" && baseURL != "" && baseURL == upstreamBaseURL {
|
} else if upstreamBaseURL != "" && baseURL != "" && baseURL == upstreamBaseURL {
|
||||||
return errors.New("base-url and upstream-base-url cannot be identical, you'll likely want to set upstream-base-url to https://ntfy.sh, see https://ntfy.sh/docs/config/#ios-instant-notifications")
|
return errors.New("base-url and upstream-base-url cannot be identical, you'll likely want to set upstream-base-url to https://ntfy.sh, see https://ntfy.sh/docs/config/#ios-instant-notifications")
|
||||||
} else if authFile == "" && (enableSignup || enableLogin || requireLogin || enableReservations || stripeSecretKey != "") {
|
} else if authFile == "" && databaseURL == "" && (enableSignup || enableLogin || requireLogin || enableReservations || stripeSecretKey != "") {
|
||||||
return errors.New("cannot set enable-signup, enable-login, require-login, enable-reserve-topics, or stripe-secret-key if auth-file is not set")
|
return errors.New("cannot set enable-signup, enable-login, require-login, enable-reserve-topics, or stripe-secret-key if auth-file or database-url is not set")
|
||||||
} else if enableSignup && !enableLogin {
|
} else if enableSignup && !enableLogin {
|
||||||
return errors.New("cannot set enable-signup without also setting enable-login")
|
return errors.New("cannot set enable-signup without also setting enable-login")
|
||||||
} else if requireLogin && !enableLogin {
|
} else if requireLogin && !enableLogin {
|
||||||
@@ -331,8 +335,8 @@ func execServe(c *cli.Context) error {
|
|||||||
return errors.New("cannot set stripe-secret-key or stripe-webhook-key, support for payments is not available in this build (nopayments)")
|
return errors.New("cannot set stripe-secret-key or stripe-webhook-key, support for payments is not available in this build (nopayments)")
|
||||||
} else if stripeSecretKey != "" && (stripeWebhookKey == "" || baseURL == "") {
|
} else if stripeSecretKey != "" && (stripeWebhookKey == "" || baseURL == "") {
|
||||||
return errors.New("if stripe-secret-key is set, stripe-webhook-key and base-url must also be set")
|
return errors.New("if stripe-secret-key is set, stripe-webhook-key and base-url must also be set")
|
||||||
} else if twilioAccount != "" && (twilioAuthToken == "" || twilioPhoneNumber == "" || twilioVerifyService == "" || baseURL == "" || authFile == "") {
|
} else if twilioAccount != "" && (twilioAuthToken == "" || twilioPhoneNumber == "" || twilioVerifyService == "" || baseURL == "" || (authFile == "" && databaseURL == "")) {
|
||||||
return errors.New("if twilio-account is set, twilio-auth-token, twilio-phone-number, twilio-verify-service, base-url, and auth-file must also be set")
|
return errors.New("if twilio-account is set, twilio-auth-token, twilio-phone-number, twilio-verify-service, base-url, and auth-file (or database-url) must also be set")
|
||||||
} else if messageSizeLimit > server.DefaultMessageSizeLimit {
|
} else if messageSizeLimit > server.DefaultMessageSizeLimit {
|
||||||
log.Warn("message-size-limit is greater than 4K, this is not recommended and largely untested, and may lead to issues with some clients")
|
log.Warn("message-size-limit is greater than 4K, this is not recommended and largely untested, and may lead to issues with some clients")
|
||||||
if messageSizeLimit > 5*1024*1024 {
|
if messageSizeLimit > 5*1024*1024 {
|
||||||
@@ -412,6 +416,15 @@ func execServe(c *cli.Context) error {
|
|||||||
payments.Setup(stripeSecretKey)
|
payments.Setup(stripeSecretKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse Twilio template
|
||||||
|
var twilioCallFormatTemplate *template.Template
|
||||||
|
if twilioCallFormat != "" {
|
||||||
|
twilioCallFormatTemplate, err = template.New("").Parse(twilioCallFormat)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse twilio-call-format template: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add default forbidden topics
|
// Add default forbidden topics
|
||||||
disallowedTopics = append(disallowedTopics, server.DefaultDisallowedTopics...)
|
disallowedTopics = append(disallowedTopics, server.DefaultDisallowedTopics...)
|
||||||
|
|
||||||
@@ -459,13 +472,7 @@ func execServe(c *cli.Context) error {
|
|||||||
conf.TwilioAuthToken = twilioAuthToken
|
conf.TwilioAuthToken = twilioAuthToken
|
||||||
conf.TwilioPhoneNumber = twilioPhoneNumber
|
conf.TwilioPhoneNumber = twilioPhoneNumber
|
||||||
conf.TwilioVerifyService = twilioVerifyService
|
conf.TwilioVerifyService = twilioVerifyService
|
||||||
if twilioCallFormat != "" {
|
conf.TwilioCallFormat = twilioCallFormatTemplate
|
||||||
tmpl, err := template.New("twiml").Parse(twilioCallFormat)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse twilio-call-format template: %w", err)
|
|
||||||
}
|
|
||||||
conf.TwilioCallFormat = tmpl
|
|
||||||
}
|
|
||||||
conf.MessageSizeLimit = int(messageSizeLimit)
|
conf.MessageSizeLimit = int(messageSizeLimit)
|
||||||
conf.MessageDelayMax = messageDelayLimit
|
conf.MessageDelayMax = messageDelayLimit
|
||||||
conf.TotalTopicLimit = totalTopicLimit
|
conf.TotalTopicLimit = totalTopicLimit
|
||||||
@@ -494,6 +501,7 @@ func execServe(c *cli.Context) error {
|
|||||||
conf.EnableMetrics = enableMetrics
|
conf.EnableMetrics = enableMetrics
|
||||||
conf.MetricsListenHTTP = metricsListenHTTP
|
conf.MetricsListenHTTP = metricsListenHTTP
|
||||||
conf.ProfileListenHTTP = profileListenHTTP
|
conf.ProfileListenHTTP = profileListenHTTP
|
||||||
|
conf.DatabaseURL = databaseURL
|
||||||
conf.WebPushPrivateKey = webPushPrivateKey
|
conf.WebPushPrivateKey = webPushPrivateKey
|
||||||
conf.WebPushPublicKey = webPushPublicKey
|
conf.WebPushPublicKey = webPushPublicKey
|
||||||
conf.WebPushFile = webPushFile
|
conf.WebPushFile = webPushFile
|
||||||
|
|||||||
28
cmd/user.go
28
cmd/user.go
@@ -6,13 +6,14 @@ import (
|
|||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"heckel.io/ntfy/v2/server"
|
|
||||||
"heckel.io/ntfy/v2/user"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"github.com/urfave/cli/v2/altsrc"
|
"github.com/urfave/cli/v2/altsrc"
|
||||||
|
"heckel.io/ntfy/v2/db"
|
||||||
|
"heckel.io/ntfy/v2/server"
|
||||||
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,6 +30,7 @@ var flagsUser = append(
|
|||||||
&cli.StringFlag{Name: "config", Aliases: []string{"c"}, EnvVars: []string{"NTFY_CONFIG_FILE"}, Value: server.DefaultConfigFile, DefaultText: server.DefaultConfigFile, Usage: "config file"},
|
&cli.StringFlag{Name: "config", Aliases: []string{"c"}, EnvVars: []string{"NTFY_CONFIG_FILE"}, Value: server.DefaultConfigFile, DefaultText: server.DefaultConfigFile, Usage: "config file"},
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "auth-file", Aliases: []string{"auth_file", "H"}, EnvVars: []string{"NTFY_AUTH_FILE"}, Usage: "auth database file used for access control"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "auth-file", Aliases: []string{"auth_file", "H"}, EnvVars: []string{"NTFY_AUTH_FILE"}, Usage: "auth database file used for access control"}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{Name: "auth-default-access", Aliases: []string{"auth_default_access", "p"}, EnvVars: []string{"NTFY_AUTH_DEFAULT_ACCESS"}, Value: "read-write", Usage: "default permissions if no matching entries in the auth database are found"}),
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "auth-default-access", Aliases: []string{"auth_default_access", "p"}, EnvVars: []string{"NTFY_AUTH_DEFAULT_ACCESS"}, Value: "read-write", Usage: "default permissions if no matching entries in the auth database are found"}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "database-url", Aliases: []string{"database_url"}, EnvVars: []string{"NTFY_DATABASE_URL"}, Usage: "PostgreSQL connection string for database-backed stores"}),
|
||||||
)
|
)
|
||||||
|
|
||||||
var cmdUser = &cli.Command{
|
var cmdUser = &cli.Command{
|
||||||
@@ -365,24 +367,30 @@ func createUserManager(c *cli.Context) (*user.Manager, error) {
|
|||||||
authFile := c.String("auth-file")
|
authFile := c.String("auth-file")
|
||||||
authStartupQueries := c.String("auth-startup-queries")
|
authStartupQueries := c.String("auth-startup-queries")
|
||||||
authDefaultAccess := c.String("auth-default-access")
|
authDefaultAccess := c.String("auth-default-access")
|
||||||
if authFile == "" {
|
databaseURL := c.String("database-url")
|
||||||
return nil, errors.New("option auth-file not set; auth is unconfigured for this server")
|
|
||||||
} else if !util.FileExists(authFile) {
|
|
||||||
return nil, errors.New("auth-file does not exist; please start the server at least once to create it")
|
|
||||||
}
|
|
||||||
authDefault, err := user.ParsePermission(authDefaultAccess)
|
authDefault, err := user.ParsePermission(authDefaultAccess)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("if set, auth-default-access must start set to 'read-write', 'read-only', 'write-only' or 'deny-all'")
|
return nil, errors.New("if set, auth-default-access must start set to 'read-write', 'read-only', 'write-only' or 'deny-all'")
|
||||||
}
|
}
|
||||||
authConfig := &user.Config{
|
authConfig := &user.Config{
|
||||||
Filename: authFile,
|
|
||||||
StartupQueries: authStartupQueries,
|
|
||||||
DefaultAccess: authDefault,
|
DefaultAccess: authDefault,
|
||||||
ProvisionEnabled: false, // Hack: Do not re-provision users on manager initialization
|
ProvisionEnabled: false, // Hack: Do not re-provision users on manager initialization
|
||||||
BcryptCost: user.DefaultUserPasswordBcryptCost,
|
BcryptCost: user.DefaultUserPasswordBcryptCost,
|
||||||
QueueWriterInterval: user.DefaultUserStatsQueueWriterInterval,
|
QueueWriterInterval: user.DefaultUserStatsQueueWriterInterval,
|
||||||
}
|
}
|
||||||
return user.NewManager(authConfig)
|
if databaseURL != "" {
|
||||||
|
pool, dbErr := db.OpenPostgres(databaseURL)
|
||||||
|
if dbErr != nil {
|
||||||
|
return nil, dbErr
|
||||||
|
}
|
||||||
|
return user.NewPostgresManager(pool, authConfig)
|
||||||
|
} else if authFile != "" {
|
||||||
|
if !util.FileExists(authFile) {
|
||||||
|
return nil, errors.New("auth-file does not exist; please start the server at least once to create it")
|
||||||
|
}
|
||||||
|
return user.NewSQLiteManager(authFile, authStartupQueries, authConfig)
|
||||||
|
}
|
||||||
|
return nil, errors.New("option database-url or auth-file not set; auth is unconfigured for this server")
|
||||||
}
|
}
|
||||||
|
|
||||||
func readPasswordAndConfirm(c *cli.Context) (string, error) {
|
func readPasswordAndConfirm(c *cli.Context) (string, error) {
|
||||||
|
|||||||
93
db/db.go
Normal file
93
db/db.go
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/jackc/pgx/v5/stdlib" // PostgreSQL driver
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
paramMaxOpenConns = "pool_max_conns"
|
||||||
|
paramMaxIdleConns = "pool_max_idle_conns"
|
||||||
|
paramConnMaxLifetime = "pool_conn_max_lifetime"
|
||||||
|
paramConnMaxIdleTime = "pool_conn_max_idle_time"
|
||||||
|
|
||||||
|
defaultMaxOpenConns = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenPostgres opens a PostgreSQL database connection pool from a DSN string. It supports custom
|
||||||
|
// query parameters for pool configuration: pool_max_conns (default 10), pool_max_idle_conns,
|
||||||
|
// pool_conn_max_lifetime, and pool_conn_max_idle_time. These parameters are stripped from
|
||||||
|
// the DSN before passing it to the driver.
|
||||||
|
func OpenPostgres(dsn string) (*sql.DB, error) {
|
||||||
|
u, err := url.Parse(dsn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid database URL: %w", err)
|
||||||
|
}
|
||||||
|
q := u.Query()
|
||||||
|
maxOpenConns, err := extractIntParam(q, paramMaxOpenConns, defaultMaxOpenConns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
maxIdleConns, err := extractIntParam(q, paramMaxIdleConns, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
connMaxLifetime, err := extractDurationParam(q, paramConnMaxLifetime, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
connMaxIdleTime, err := extractDurationParam(q, paramConnMaxIdleTime, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
db, err := sql.Open("pgx", u.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
db.SetMaxOpenConns(maxOpenConns)
|
||||||
|
if maxIdleConns > 0 {
|
||||||
|
db.SetMaxIdleConns(maxIdleConns)
|
||||||
|
}
|
||||||
|
if connMaxLifetime > 0 {
|
||||||
|
db.SetConnMaxLifetime(connMaxLifetime)
|
||||||
|
}
|
||||||
|
if connMaxIdleTime > 0 {
|
||||||
|
db.SetConnMaxIdleTime(connMaxIdleTime)
|
||||||
|
}
|
||||||
|
if err := db.Ping(); err != nil {
|
||||||
|
return nil, fmt.Errorf("ping failed: %w", err)
|
||||||
|
}
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractIntParam(q url.Values, key string, defaultValue int) (int, error) {
|
||||||
|
s := q.Get(key)
|
||||||
|
if s == "" {
|
||||||
|
return defaultValue, nil
|
||||||
|
}
|
||||||
|
q.Del(key)
|
||||||
|
v, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid %s value %q: %w", key, s, err)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractDurationParam(q url.Values, key string, defaultValue time.Duration) (time.Duration, error) {
|
||||||
|
s := q.Get(key)
|
||||||
|
if s == "" {
|
||||||
|
return defaultValue, nil
|
||||||
|
}
|
||||||
|
q.Del(key)
|
||||||
|
d, err := time.ParseDuration(s)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid %s value %q: %w", key, s, err)
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
63
db/test/test.go
Normal file
63
db/test/test.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package dbtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/db"
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testPoolMaxConns = "2"
|
||||||
|
|
||||||
|
// CreateTestPostgresSchema creates a temporary PostgreSQL schema and returns the DSN pointing to it.
|
||||||
|
// It registers a cleanup function to drop the schema when the test finishes.
|
||||||
|
// If NTFY_TEST_DATABASE_URL is not set, the test is skipped.
|
||||||
|
func CreateTestPostgresSchema(t *testing.T) string {
|
||||||
|
t.Helper()
|
||||||
|
dsn := os.Getenv("NTFY_TEST_DATABASE_URL")
|
||||||
|
if dsn == "" {
|
||||||
|
t.Skip("NTFY_TEST_DATABASE_URL not set")
|
||||||
|
}
|
||||||
|
schema := fmt.Sprintf("test_%s", util.RandomString(10))
|
||||||
|
u, err := url.Parse(dsn)
|
||||||
|
require.Nil(t, err)
|
||||||
|
q := u.Query()
|
||||||
|
q.Set("pool_max_conns", testPoolMaxConns)
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
dsn = u.String()
|
||||||
|
setupDB, err := db.OpenPostgres(dsn)
|
||||||
|
require.Nil(t, err)
|
||||||
|
_, err = setupDB.Exec(fmt.Sprintf("CREATE SCHEMA %s", schema))
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Nil(t, setupDB.Close())
|
||||||
|
q.Set("search_path", schema)
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
schemaDSN := u.String()
|
||||||
|
t.Cleanup(func() {
|
||||||
|
cleanDB, err := db.OpenPostgres(dsn)
|
||||||
|
if err == nil {
|
||||||
|
cleanDB.Exec(fmt.Sprintf("DROP SCHEMA %s CASCADE", schema))
|
||||||
|
cleanDB.Close()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return schemaDSN
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTestPostgres creates a temporary PostgreSQL schema and returns an open *sql.DB connection to it.
|
||||||
|
// It registers cleanup functions to close the DB and drop the schema when the test finishes.
|
||||||
|
// If NTFY_TEST_DATABASE_URL is not set, the test is skipped.
|
||||||
|
func CreateTestPostgres(t *testing.T) *sql.DB {
|
||||||
|
t.Helper()
|
||||||
|
schemaDSN := CreateTestPostgresSchema(t)
|
||||||
|
testDB, err := db.OpenPostgres(schemaDSN)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
testDB.Close()
|
||||||
|
})
|
||||||
|
return testDB
|
||||||
|
}
|
||||||
@@ -53,6 +53,16 @@ Here are a few working sample configs using a `/etc/ntfy/server.yml` file:
|
|||||||
behind-proxy: true
|
behind-proxy: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
=== "server.yml (PostgreSQL, behind proxy)"
|
||||||
|
``` yaml
|
||||||
|
base-url: "https://ntfy.example.com"
|
||||||
|
listen-http: ":2586"
|
||||||
|
database-url: "postgres://ntfy:mypassword@db.example.com:5432/ntfy?sslmode=require"
|
||||||
|
attachment-cache-dir: "/var/cache/ntfy/attachments"
|
||||||
|
behind-proxy: true
|
||||||
|
auth-default-access: "deny-all"
|
||||||
|
```
|
||||||
|
|
||||||
=== "server.yml (ntfy.sh config)"
|
=== "server.yml (ntfy.sh config)"
|
||||||
``` yaml
|
``` yaml
|
||||||
# All the things: Behind a proxy, Firebase, cache, attachments,
|
# All the things: Behind a proxy, Firebase, cache, attachments,
|
||||||
@@ -125,16 +135,63 @@ using Docker Compose (i.e. `docker-compose.yml`):
|
|||||||
command: serve
|
command: serve
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Database options
|
||||||
|
ntfy uses a database for storing messages ([message cache](#message-cache)), users and [access control](#access-control), and [web push](#web-push) subscriptions.
|
||||||
|
You can choose between **SQLite** and **PostgreSQL** as the database backend.
|
||||||
|
|
||||||
|
### SQLite
|
||||||
|
By default, ntfy uses SQLite with separate database files for each store. This is the simplest setup and requires
|
||||||
|
no external dependencies:
|
||||||
|
|
||||||
|
* `cache-file`: Database file for the [message cache](#message-cache).
|
||||||
|
* `auth-file`: Database file for authentication and [access control](#access-control). If set, enables auth.
|
||||||
|
* `web-push-file`: Database file for [web push](#web-push) subscriptions.
|
||||||
|
|
||||||
|
### PostgreSQL (EXPERIMENTAL)
|
||||||
|
As an alternative, you can configure ntfy to use PostgreSQL for **all** database-backed stores by setting the
|
||||||
|
`database-url` option to a PostgreSQL connection string:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
database-url: "postgres://user:pass@host:5432/ntfy"
|
||||||
|
```
|
||||||
|
|
||||||
|
When `database-url` is set, ntfy will use PostgreSQL for the [message cache](#message-cache),
|
||||||
|
[access control](#access-control), and [web push](#web-push) subscriptions instead of SQLite. The `cache-file`,
|
||||||
|
`auth-file`, and `web-push-file` options **must not** be set in this case.
|
||||||
|
|
||||||
|
Note that setting `database-url` implicitly enables authentication and access control (equivalent to setting
|
||||||
|
`auth-file` with SQLite). The default access is `read-write`, so anonymous users can still read and write to all
|
||||||
|
topics. To restrict access, set `auth-default-access` to `deny-all` (see [access control](#access-control)).
|
||||||
|
|
||||||
|
You can also set this via the environment variable `NTFY_DATABASE_URL` or the command line flag `--database-url`.
|
||||||
|
|
||||||
|
The database URL supports the standard [PostgreSQL connection parameters](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
|
||||||
|
as query parameters, such as `sslmode`, `connect_timeout`, `sslcert`, `sslkey`, `sslrootcert`, and `application_name`.
|
||||||
|
See the [pgx driver documentation](https://pkg.go.dev/github.com/jackc/pgx/v5) for the full list of supported parameters.
|
||||||
|
|
||||||
|
In addition, ntfy supports the following custom query parameters to tune the connection pool:
|
||||||
|
|
||||||
|
| Parameter | Default | Description |
|
||||||
|
|---------------------------|---------|----------------------------------------------------------------------------------|
|
||||||
|
| `pool_max_conns` | 10 | Maximum number of open connections to the database |
|
||||||
|
| `pool_max_idle_conns` | - | Maximum number of idle connections in the pool |
|
||||||
|
| `pool_conn_max_lifetime` | - | Maximum amount of time a connection may be reused (Go duration, e.g. `5m`, `1h`) |
|
||||||
|
| `pool_conn_max_idle_time` | - | Maximum amount of time a connection may be idle (Go duration, e.g. `30s`, `5m`) |
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
database-url: "postgres://user:pass@host:5432/ntfy?sslmode=require&pool_max_conns=50&pool_conn_max_idle_time=5m"
|
||||||
|
```
|
||||||
|
|
||||||
## Message cache
|
## Message cache
|
||||||
If desired, ntfy can temporarily keep notifications in an in-memory or an on-disk cache. Caching messages for a short period
|
If desired, ntfy can temporarily keep notifications in an in-memory or an on-disk cache. Caching messages for a short period
|
||||||
of time is important to allow [phones](subscribe/phone.md) and other devices with brittle Internet connections to be able to retrieve
|
of time is important to allow [phones](subscribe/phone.md) and other devices with brittle Internet connections to be able to retrieve
|
||||||
notifications that they may have missed.
|
notifications that they may have missed.
|
||||||
|
|
||||||
By default, ntfy keeps messages **in-memory for 12 hours**, which means that **cached messages do not survive an application
|
By default, ntfy keeps messages **in-memory for 12 hours**, which means that **cached messages do not survive an application
|
||||||
restart**. You can override this behavior using the following config settings:
|
restart**. You can override this behavior by setting `cache-file` (SQLite) or `database-url` (PostgreSQL).
|
||||||
|
|
||||||
* `cache-file`: if set, ntfy will store messages in a SQLite based cache (default is empty, which means in-memory cache).
|
|
||||||
**This is required if you'd like messages to be retained across restarts**.
|
|
||||||
* `cache-duration`: defines the duration for which messages are stored in the cache (default is `12h`).
|
* `cache-duration`: defines the duration for which messages are stored in the cache (default is `12h`).
|
||||||
|
|
||||||
You can also entirely disable the cache by setting `cache-duration` to `0`. When the cache is disabled, messages are only
|
You can also entirely disable the cache by setting `cache-duration` to `0`. When the cache is disabled, messages are only
|
||||||
@@ -185,14 +242,15 @@ and `visitor-attachment-daily-bandwidth-limit`. Setting these conservatively is
|
|||||||
By default, the ntfy server is open for everyone, meaning **everyone can read and write to any topic** (this is how
|
By default, the ntfy server is open for everyone, meaning **everyone can read and write to any topic** (this is how
|
||||||
ntfy.sh is configured). To restrict access to your own server, you can optionally configure authentication and authorization.
|
ntfy.sh is configured). To restrict access to your own server, you can optionally configure authentication and authorization.
|
||||||
|
|
||||||
ntfy's auth is implemented with a simple [SQLite](https://www.sqlite.org/)-based backend. It implements two roles
|
ntfy's auth implements two roles (`user` and `admin`) and per-topic `read` and `write` permissions using an
|
||||||
(`user` and `admin`) and per-topic `read` and `write` permissions using an [access control list (ACL)](https://en.wikipedia.org/wiki/Access-control_list).
|
[access control list (ACL)](https://en.wikipedia.org/wiki/Access-control_list). Access control entries can be applied
|
||||||
Access control entries can be applied to users as well as the special everyone user (`*`), which represents anonymous API access.
|
to users as well as the special everyone user (`*`), which represents anonymous API access.
|
||||||
|
|
||||||
To set up auth, **configure the following options**:
|
To set up auth, **configure the following options**:
|
||||||
|
|
||||||
* `auth-file` is the user/access database; it is created automatically if it doesn't already exist; suggested
|
* `auth-file` is the user/access database (SQLite); it is created automatically if it doesn't already exist; suggested
|
||||||
location `/var/lib/ntfy/user.db` (easiest if deb/rpm package is used)
|
location `/var/lib/ntfy/user.db` (easiest if deb/rpm package is used). Alternatively, if `database-url` is set,
|
||||||
|
auth is automatically enabled using PostgreSQL (see [database options](#database-options)).
|
||||||
* `auth-default-access` defines the default/fallback access if no access control entry is found; it can be
|
* `auth-default-access` defines the default/fallback access if no access control entry is found; it can be
|
||||||
set to `read-write` (default), `read-only`, `write-only` or `deny-all`. **If you are setting up a private instance,
|
set to `read-write` (default), `read-only`, `write-only` or `deny-all`. **If you are setting up a private instance,
|
||||||
you'll want to set this to `deny-all`** (see [private instance example](#example-private-instance)).
|
you'll want to set this to `deny-all`** (see [private instance example](#example-private-instance)).
|
||||||
@@ -1141,12 +1199,15 @@ a database to keep track of the browser's subscriptions, and an admin email addr
|
|||||||
|
|
||||||
- `web-push-public-key` is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
- `web-push-public-key` is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
||||||
- `web-push-private-key` is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
- `web-push-private-key` is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
||||||
- `web-push-file` is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
|
- `web-push-file` is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db` (not required if `database-url` is set)
|
||||||
- `web-push-email-address` is the admin email address send to the push provider, e.g. `sysadmin@example.com`
|
- `web-push-email-address` is the admin email address send to the push provider, e.g. `sysadmin@example.com`
|
||||||
- `web-push-startup-queries` is an optional list of queries to run on startup`
|
- `web-push-startup-queries` is an optional list of queries to run on startup`
|
||||||
- `web-push-expiry-warning-duration` defines the duration after which unused subscriptions are sent a warning (default is `55d`)
|
- `web-push-expiry-warning-duration` defines the duration after which unused subscriptions are sent a warning (default is `55d`)
|
||||||
- `web-push-expiry-duration` defines the duration after which unused subscriptions will expire (default is `60d`)
|
- `web-push-expiry-duration` defines the duration after which unused subscriptions will expire (default is `60d`)
|
||||||
|
|
||||||
|
Alternatively, you can use PostgreSQL instead of SQLite by setting `database-url`
|
||||||
|
(see [PostgreSQL database](#postgresql-experimental)).
|
||||||
|
|
||||||
Limitations:
|
Limitations:
|
||||||
|
|
||||||
- Like foreground browser notifications, background push notifications require the web app to be served over HTTPS. A _valid_
|
- Like foreground browser notifications, background push notifications require the web app to be served over HTTPS. A _valid_
|
||||||
@@ -1172,9 +1233,10 @@ web-push-file: /var/cache/ntfy/webpush.db
|
|||||||
web-push-email-address: sysadmin@example.com
|
web-push-email-address: sysadmin@example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
The `web-push-file` is used to store the push subscriptions. Unused subscriptions will send out a warning after 55 days,
|
The `web-push-file` is used to store the push subscriptions in a local SQLite database. Alternatively, if `database-url`
|
||||||
and will automatically expire after 60 days (default). If the gateway returns an error (e.g. 410 Gone when a user has unsubscribed),
|
is set, subscriptions are stored in PostgreSQL and `web-push-file` is not required. Unused subscriptions will send out
|
||||||
subscriptions are also removed automatically.
|
a warning after 55 days, and will automatically expire after 60 days (default). If the gateway returns an error
|
||||||
|
(e.g. 410 Gone when a user has unsubscribed), subscriptions are also removed automatically.
|
||||||
|
|
||||||
The web app refreshes subscriptions on start and regularly on an interval, but this file should be persisted across restarts. If the subscription
|
The web app refreshes subscriptions on start and regularly on an interval, but this file should be persisted across restarts. If the subscription
|
||||||
file is deleted or lost, any web apps that aren't open will not receive new web push notifications until you open then.
|
file is deleted or lost, any web apps that aren't open will not receive new web push notifications until you open then.
|
||||||
@@ -1755,12 +1817,13 @@ variable before running the `ntfy` command (e.g. `export NTFY_LISTEN_HTTP=:80`).
|
|||||||
| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. |
|
| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. |
|
||||||
| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. |
|
| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. |
|
||||||
| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). |
|
| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). |
|
||||||
|
| `database-url` | `NTFY_DATABASE_URL` | *string (connection URL)* | - | PostgreSQL connection string (e.g. `postgres://user:pass@host:5432/ntfy`). If set, uses PostgreSQL for all database-backed stores (message cache, user manager, web push) instead of SQLite. See [database options](#database-options). |
|
||||||
| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). |
|
| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). |
|
||||||
| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. |
|
| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. |
|
||||||
| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) |
|
| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) |
|
||||||
| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) |
|
| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) |
|
||||||
| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) |
|
| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) |
|
||||||
| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control. If set, enables authentication and access control. See [access control](#access-control). |
|
| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control (SQLite). If set, enables authentication and access control. Not required if `database-url` is set. See [access control](#access-control). |
|
||||||
| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. |
|
| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. |
|
||||||
| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) |
|
| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) |
|
||||||
| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) |
|
| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) |
|
||||||
|
|||||||
@@ -1721,6 +1721,16 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release
|
|||||||
|
|
||||||
### ntfy server v2.18.x (UNRELEASED)
|
### ntfy server v2.18.x (UNRELEASED)
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
|
||||||
|
* Add experimental [PostgreSQL support](config.md#postgresql-experimental) as an alternative database backend (message cache, user manager, web push subscriptions) via `database-url` config option ([#1114](https://github.com/binwiederhier/ntfy/issues/1114), thanks to [@brettinternet](https://github.com/brettinternet) for reporting)
|
||||||
|
|
||||||
**Bug fixes + maintenance:**
|
**Bug fixes + maintenance:**
|
||||||
|
|
||||||
* Preserve `<br>` line breaks in HTML-only emails received via SMTP ([#690](https://github.com/binwiederhier/ntfy/issues/690), [#1620](https://github.com/binwiederhier/ntfy/pull/1620), thanks to [@uzkikh](https://github.com/uzkikh) for the fix and to [@teastrainer](https://github.com/teastrainer) for reporting)
|
* Preserve `<br>` line breaks in HTML-only emails received via SMTP ([#690](https://github.com/binwiederhier/ntfy/issues/690), [#1620](https://github.com/binwiederhier/ntfy/pull/1620), thanks to [@uzkikh](https://github.com/uzkikh) for the fix and to [@teastrainer](https://github.com/teastrainer) for reporting)
|
||||||
|
|
||||||
|
### ntfy Android v1.24.x (UNRELEASED)
|
||||||
|
|
||||||
|
**Bug fixes + maintenance:**
|
||||||
|
|
||||||
|
* Fix crash in settings when fragment is detached during backup/restore or log operations
|
||||||
|
|||||||
4
go.mod
4
go.mod
@@ -30,6 +30,7 @@ require github.com/pkg/errors v0.9.1 // indirect
|
|||||||
require (
|
require (
|
||||||
firebase.google.com/go/v4 v4.19.0
|
firebase.google.com/go/v4 v4.19.0
|
||||||
github.com/SherClockHolmes/webpush-go v1.4.0
|
github.com/SherClockHolmes/webpush-go v1.4.0
|
||||||
|
github.com/jackc/pgx/v5 v5.8.0
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/prometheus/client_golang v1.23.2
|
github.com/prometheus/client_golang v1.23.2
|
||||||
github.com/stripe/stripe-go/v74 v74.30.0
|
github.com/stripe/stripe-go/v74 v74.30.0
|
||||||
@@ -71,6 +72,9 @@ require (
|
|||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
|
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
|
||||||
github.com/gorilla/css v1.0.1 // indirect
|
github.com/gorilla/css v1.0.1 // indirect
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||||
|
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
|||||||
9
go.sum
9
go.sum
@@ -104,6 +104,14 @@ github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
|||||||
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
||||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||||
|
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||||
|
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||||
|
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||||
|
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
@@ -144,6 +152,7 @@ github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xI
|
|||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
|
|||||||
606
message/cache.go
Normal file
606
message/cache.go
Normal file
@@ -0,0 +1,606 @@
|
|||||||
|
package message
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"net/netip"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
tagMessageCache = "message_cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errNoRows = errors.New("no rows found")
|
||||||
|
|
||||||
|
// queries holds the database-specific SQL queries
|
||||||
|
type queries struct {
|
||||||
|
insertMessage string
|
||||||
|
deleteMessage string
|
||||||
|
selectScheduledMessageIDsBySeqID string
|
||||||
|
deleteScheduledBySequenceID string
|
||||||
|
updateMessagesForTopicExpiry string
|
||||||
|
selectMessagesByID string
|
||||||
|
selectMessagesSinceTime string
|
||||||
|
selectMessagesSinceTimeScheduled string
|
||||||
|
selectMessagesSinceID string
|
||||||
|
selectMessagesSinceIDScheduled string
|
||||||
|
selectMessagesLatest string
|
||||||
|
selectMessagesDue string
|
||||||
|
selectMessagesExpired string
|
||||||
|
updateMessagePublished string
|
||||||
|
selectMessagesCount string
|
||||||
|
selectTopics string
|
||||||
|
updateAttachmentDeleted string
|
||||||
|
selectAttachmentsExpired string
|
||||||
|
selectAttachmentsSizeBySender string
|
||||||
|
selectAttachmentsSizeByUserID string
|
||||||
|
selectStats string
|
||||||
|
updateStats string
|
||||||
|
updateMessageTime string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache stores published messages
|
||||||
|
type Cache struct {
|
||||||
|
db *sql.DB
|
||||||
|
queue *util.BatchingQueue[*model.Message]
|
||||||
|
nop bool
|
||||||
|
mu *sync.Mutex // nil for PostgreSQL (concurrent writes supported), set for SQLite (single writer)
|
||||||
|
queries queries
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCache(db *sql.DB, queries queries, mu *sync.Mutex, batchSize int, batchTimeout time.Duration, nop bool) *Cache {
|
||||||
|
var queue *util.BatchingQueue[*model.Message]
|
||||||
|
if batchSize > 0 || batchTimeout > 0 {
|
||||||
|
queue = util.NewBatchingQueue[*model.Message](batchSize, batchTimeout)
|
||||||
|
}
|
||||||
|
c := &Cache{
|
||||||
|
db: db,
|
||||||
|
queue: queue,
|
||||||
|
nop: nop,
|
||||||
|
mu: mu,
|
||||||
|
queries: queries,
|
||||||
|
}
|
||||||
|
go c.processMessageBatches()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) maybeLock() {
|
||||||
|
if c.mu != nil {
|
||||||
|
c.mu.Lock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) maybeUnlock() {
|
||||||
|
if c.mu != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMessage stores a message to the message cache synchronously, or queues it to be stored at a later date asynchronously.
|
||||||
|
// The message is queued only if "batchSize" or "batchTimeout" are passed to the constructor.
|
||||||
|
func (c *Cache) AddMessage(m *model.Message) error {
|
||||||
|
if c.queue != nil {
|
||||||
|
c.queue.Enqueue(m)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return c.addMessages([]*model.Message{m})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMessages synchronously stores a batch of messages to the message cache
|
||||||
|
func (c *Cache) AddMessages(ms []*model.Message) error {
|
||||||
|
return c.addMessages(ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) addMessages(ms []*model.Message) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
if c.nop {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(ms) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
stmt, err := tx.Prepare(c.queries.insertMessage)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
for _, m := range ms {
|
||||||
|
if m.Event != model.MessageEvent && m.Event != model.MessageDeleteEvent && m.Event != model.MessageClearEvent {
|
||||||
|
return model.ErrUnexpectedMessageType
|
||||||
|
}
|
||||||
|
published := m.Time <= time.Now().Unix()
|
||||||
|
tags := strings.Join(m.Tags, ",")
|
||||||
|
var attachmentName, attachmentType, attachmentURL string
|
||||||
|
var attachmentSize, attachmentExpires int64
|
||||||
|
var attachmentDeleted bool
|
||||||
|
if m.Attachment != nil {
|
||||||
|
attachmentName = m.Attachment.Name
|
||||||
|
attachmentType = m.Attachment.Type
|
||||||
|
attachmentSize = m.Attachment.Size
|
||||||
|
attachmentExpires = m.Attachment.Expires
|
||||||
|
attachmentURL = m.Attachment.URL
|
||||||
|
}
|
||||||
|
var actionsStr string
|
||||||
|
if len(m.Actions) > 0 {
|
||||||
|
actionsBytes, err := json.Marshal(m.Actions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
actionsStr = string(actionsBytes)
|
||||||
|
}
|
||||||
|
var sender string
|
||||||
|
if m.Sender.IsValid() {
|
||||||
|
sender = m.Sender.String()
|
||||||
|
}
|
||||||
|
_, err := stmt.Exec(
|
||||||
|
m.ID,
|
||||||
|
m.SequenceID,
|
||||||
|
m.Time,
|
||||||
|
m.Event,
|
||||||
|
m.Expires,
|
||||||
|
m.Topic,
|
||||||
|
m.Message,
|
||||||
|
m.Title,
|
||||||
|
m.Priority,
|
||||||
|
tags,
|
||||||
|
m.Click,
|
||||||
|
m.Icon,
|
||||||
|
actionsStr,
|
||||||
|
attachmentName,
|
||||||
|
attachmentType,
|
||||||
|
attachmentSize,
|
||||||
|
attachmentExpires,
|
||||||
|
attachmentURL,
|
||||||
|
attachmentDeleted, // Always zero
|
||||||
|
sender,
|
||||||
|
m.User,
|
||||||
|
m.ContentType,
|
||||||
|
m.Encoding,
|
||||||
|
published,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
log.Tag(tagMessageCache).Err(err).Error("Writing %d message(s) failed (took %v)", len(ms), time.Since(start))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Tag(tagMessageCache).Debug("Wrote %d message(s) in %v", len(ms), time.Since(start))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Messages returns messages for a topic since the given marker, optionally including scheduled messages
|
||||||
|
func (c *Cache) Messages(topic string, since model.SinceMarker, scheduled bool) ([]*model.Message, error) {
|
||||||
|
if since.IsNone() {
|
||||||
|
return make([]*model.Message, 0), nil
|
||||||
|
} else if since.IsLatest() {
|
||||||
|
return c.messagesLatest(topic)
|
||||||
|
} else if since.IsID() {
|
||||||
|
return c.messagesSinceID(topic, since, scheduled)
|
||||||
|
}
|
||||||
|
return c.messagesSinceTime(topic, since, scheduled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) messagesSinceTime(topic string, since model.SinceMarker, scheduled bool) ([]*model.Message, error) {
|
||||||
|
var rows *sql.Rows
|
||||||
|
var err error
|
||||||
|
if scheduled {
|
||||||
|
rows, err = c.db.Query(c.queries.selectMessagesSinceTimeScheduled, topic, since.Time().Unix())
|
||||||
|
} else {
|
||||||
|
rows, err = c.db.Query(c.queries.selectMessagesSinceTime, topic, since.Time().Unix())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readMessages(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) messagesSinceID(topic string, since model.SinceMarker, scheduled bool) ([]*model.Message, error) {
|
||||||
|
var rows *sql.Rows
|
||||||
|
var err error
|
||||||
|
if scheduled {
|
||||||
|
rows, err = c.db.Query(c.queries.selectMessagesSinceIDScheduled, topic, since.ID())
|
||||||
|
} else {
|
||||||
|
rows, err = c.db.Query(c.queries.selectMessagesSinceID, topic, since.ID())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readMessages(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) messagesLatest(topic string) ([]*model.Message, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectMessagesLatest, topic)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readMessages(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessagesDue returns all messages that are due for publishing
|
||||||
|
func (c *Cache) MessagesDue() ([]*model.Message, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectMessagesDue, time.Now().Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readMessages(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessagesExpired returns a list of IDs for messages that have expired (should be deleted)
|
||||||
|
func (c *Cache) MessagesExpired() ([]string, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectMessagesExpired, time.Now().Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
ids := make([]string, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var id string
|
||||||
|
if err := rows.Scan(&id); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the message with the given ID, or ErrMessageNotFound if not found
|
||||||
|
func (c *Cache) Message(id string) (*model.Message, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectMessagesByID, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, model.ErrMessageNotFound
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return readMessage(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMessageTime updates the time column for a message by ID. This is only used for testing.
|
||||||
|
func (c *Cache) UpdateMessageTime(messageID string, timestamp int64) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
_, err := c.db.Exec(c.queries.updateMessageTime, timestamp, messageID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkPublished marks a message as published
|
||||||
|
func (c *Cache) MarkPublished(m *model.Message) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
_, err := c.db.Exec(c.queries.updateMessagePublished, m.ID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessagesCount returns the total number of messages in the cache
|
||||||
|
func (c *Cache) MessagesCount() (int, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectMessagesCount)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
if !rows.Next() {
|
||||||
|
return 0, errNoRows
|
||||||
|
}
|
||||||
|
var count int
|
||||||
|
if err := rows.Scan(&count); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topics returns a list of all topics with messages in the cache
|
||||||
|
func (c *Cache) Topics() ([]string, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectTopics)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
topics := make([]string, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var id string
|
||||||
|
if err := rows.Scan(&id); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
topics = append(topics, id)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return topics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteMessages deletes the messages with the given IDs
|
||||||
|
func (c *Cache) DeleteMessages(ids ...string) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
for _, id := range ids {
|
||||||
|
if _, err := tx.Exec(c.queries.deleteMessage, id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteScheduledBySequenceID deletes unpublished (scheduled) messages with the given topic and sequence ID.
|
||||||
|
// It returns the message IDs of the deleted messages, which can be used to clean up attachment files.
|
||||||
|
func (c *Cache) DeleteScheduledBySequenceID(topic, sequenceID string) ([]string, error) {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
// First, get the message IDs of scheduled messages to be deleted
|
||||||
|
rows, err := tx.Query(c.queries.selectScheduledMessageIDsBySeqID, topic, sequenceID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
ids := make([]string, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var id string
|
||||||
|
if err := rows.Scan(&id); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rows.Close() // Close rows before executing delete in same transaction
|
||||||
|
// Then delete the messages
|
||||||
|
if _, err := tx.Exec(c.queries.deleteScheduledBySequenceID, topic, sequenceID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpireMessages marks messages in the given topics as expired
|
||||||
|
func (c *Cache) ExpireMessages(topics ...string) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
for _, t := range topics {
|
||||||
|
if _, err := tx.Exec(c.queries.updateMessagesForTopicExpiry, time.Now().Unix()-1, t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachmentsExpired returns message IDs with expired attachments that have not been deleted
|
||||||
|
func (c *Cache) AttachmentsExpired() ([]string, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectAttachmentsExpired, time.Now().Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
ids := make([]string, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var id string
|
||||||
|
if err := rows.Scan(&id); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkAttachmentsDeleted marks the attachments for the given message IDs as deleted
|
||||||
|
func (c *Cache) MarkAttachmentsDeleted(ids ...string) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
tx, err := c.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
for _, id := range ids {
|
||||||
|
if _, err := tx.Exec(c.queries.updateAttachmentDeleted, id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachmentBytesUsedBySender returns the total size of active attachments sent by the given sender
|
||||||
|
func (c *Cache) AttachmentBytesUsedBySender(sender string) (int64, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectAttachmentsSizeBySender, sender, time.Now().Unix())
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return c.readAttachmentBytesUsed(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachmentBytesUsedByUser returns the total size of active attachments for the given user
|
||||||
|
func (c *Cache) AttachmentBytesUsedByUser(userID string) (int64, error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectAttachmentsSizeByUserID, userID, time.Now().Unix())
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return c.readAttachmentBytesUsed(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) readAttachmentBytesUsed(rows *sql.Rows) (int64, error) {
|
||||||
|
defer rows.Close()
|
||||||
|
var size int64
|
||||||
|
if !rows.Next() {
|
||||||
|
return 0, errors.New("no rows found")
|
||||||
|
}
|
||||||
|
if err := rows.Scan(&size); err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else if err := rows.Err(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateStats updates the total message count statistic
|
||||||
|
func (c *Cache) UpdateStats(messages int64) error {
|
||||||
|
c.maybeLock()
|
||||||
|
defer c.maybeUnlock()
|
||||||
|
_, err := c.db.Exec(c.queries.updateStats, messages)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns the total message count statistic
|
||||||
|
func (c *Cache) Stats() (messages int64, err error) {
|
||||||
|
rows, err := c.db.Query(c.queries.selectStats)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
if !rows.Next() {
|
||||||
|
return 0, errNoRows
|
||||||
|
}
|
||||||
|
if err := rows.Scan(&messages); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return messages, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying database connection
|
||||||
|
func (c *Cache) Close() error {
|
||||||
|
return c.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) processMessageBatches() {
|
||||||
|
if c.queue == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for messages := range c.queue.Dequeue() {
|
||||||
|
if err := c.addMessages(messages); err != nil {
|
||||||
|
log.Tag(tagMessageCache).Err(err).Error("Cannot write message batch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMessages(rows *sql.Rows) ([]*model.Message, error) {
|
||||||
|
defer rows.Close()
|
||||||
|
messages := make([]*model.Message, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
m, err := readMessage(rows)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
messages = append(messages, m)
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return messages, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMessage(rows *sql.Rows) (*model.Message, error) {
|
||||||
|
var timestamp, expires, attachmentSize, attachmentExpires int64
|
||||||
|
var priority int
|
||||||
|
var id, sequenceID, event, topic, msg, title, tagsStr, click, icon, actionsStr, attachmentName, attachmentType, attachmentURL, sender, user, contentType, encoding string
|
||||||
|
err := rows.Scan(
|
||||||
|
&id,
|
||||||
|
&sequenceID,
|
||||||
|
×tamp,
|
||||||
|
&event,
|
||||||
|
&expires,
|
||||||
|
&topic,
|
||||||
|
&msg,
|
||||||
|
&title,
|
||||||
|
&priority,
|
||||||
|
&tagsStr,
|
||||||
|
&click,
|
||||||
|
&icon,
|
||||||
|
&actionsStr,
|
||||||
|
&attachmentName,
|
||||||
|
&attachmentType,
|
||||||
|
&attachmentSize,
|
||||||
|
&attachmentExpires,
|
||||||
|
&attachmentURL,
|
||||||
|
&sender,
|
||||||
|
&user,
|
||||||
|
&contentType,
|
||||||
|
&encoding,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tags []string
|
||||||
|
if tagsStr != "" {
|
||||||
|
tags = strings.Split(tagsStr, ",")
|
||||||
|
}
|
||||||
|
var actions []*model.Action
|
||||||
|
if actionsStr != "" {
|
||||||
|
if err := json.Unmarshal([]byte(actionsStr), &actions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
senderIP, err := netip.ParseAddr(sender)
|
||||||
|
if err != nil {
|
||||||
|
senderIP = netip.Addr{} // if no IP stored in database, return invalid address
|
||||||
|
}
|
||||||
|
var att *model.Attachment
|
||||||
|
if attachmentName != "" && attachmentURL != "" {
|
||||||
|
att = &model.Attachment{
|
||||||
|
Name: attachmentName,
|
||||||
|
Type: attachmentType,
|
||||||
|
Size: attachmentSize,
|
||||||
|
Expires: attachmentExpires,
|
||||||
|
URL: attachmentURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &model.Message{
|
||||||
|
ID: id,
|
||||||
|
SequenceID: sequenceID,
|
||||||
|
Time: timestamp,
|
||||||
|
Expires: expires,
|
||||||
|
Event: event,
|
||||||
|
Topic: topic,
|
||||||
|
Message: msg,
|
||||||
|
Title: title,
|
||||||
|
Priority: priority,
|
||||||
|
Tags: tags,
|
||||||
|
Click: click,
|
||||||
|
Icon: icon,
|
||||||
|
Actions: actions,
|
||||||
|
Attachment: att,
|
||||||
|
Sender: senderIP,
|
||||||
|
User: user,
|
||||||
|
ContentType: contentType,
|
||||||
|
Encoding: encoding,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
110
message/cache_postgres.go
Normal file
110
message/cache_postgres.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
package message
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL runtime query constants
|
||||||
|
const (
|
||||||
|
postgresInsertMessageQuery = `
|
||||||
|
INSERT INTO message (mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user_id, content_type, encoding, published)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24)
|
||||||
|
`
|
||||||
|
postgresDeleteMessageQuery = `DELETE FROM message WHERE mid = $1`
|
||||||
|
postgresSelectScheduledMessageIDsBySeqIDQuery = `SELECT mid FROM message WHERE topic = $1 AND sequence_id = $2 AND published = FALSE`
|
||||||
|
postgresDeleteScheduledBySequenceIDQuery = `DELETE FROM message WHERE topic = $1 AND sequence_id = $2 AND published = FALSE`
|
||||||
|
postgresUpdateMessagesForTopicExpiryQuery = `UPDATE message SET expires = $1 WHERE topic = $2`
|
||||||
|
postgresSelectMessagesByIDQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE mid = $1
|
||||||
|
`
|
||||||
|
postgresSelectMessagesSinceTimeQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE topic = $1 AND time >= $2 AND published = TRUE
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
postgresSelectMessagesSinceTimeIncludeScheduledQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE topic = $1 AND time >= $2
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
postgresSelectMessagesSinceIDQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE topic = $1
|
||||||
|
AND id > COALESCE((SELECT id FROM message WHERE mid = $2), 0)
|
||||||
|
AND published = TRUE
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
postgresSelectMessagesSinceIDIncludeScheduledQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE topic = $1
|
||||||
|
AND (id > COALESCE((SELECT id FROM message WHERE mid = $2), 0) OR published = FALSE)
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
postgresSelectMessagesLatestQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE topic = $1 AND published = TRUE
|
||||||
|
ORDER BY time DESC, id DESC
|
||||||
|
LIMIT 1
|
||||||
|
`
|
||||||
|
postgresSelectMessagesDueQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user_id, content_type, encoding
|
||||||
|
FROM message
|
||||||
|
WHERE time <= $1 AND published = FALSE
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
postgresSelectMessagesExpiredQuery = `SELECT mid FROM message WHERE expires <= $1 AND published = TRUE`
|
||||||
|
postgresUpdateMessagePublishedQuery = `UPDATE message SET published = TRUE WHERE mid = $1`
|
||||||
|
postgresSelectMessagesCountQuery = `SELECT COUNT(*) FROM message`
|
||||||
|
postgresSelectTopicsQuery = `SELECT topic FROM message GROUP BY topic`
|
||||||
|
|
||||||
|
postgresUpdateAttachmentDeletedQuery = `UPDATE message SET attachment_deleted = TRUE WHERE mid = $1`
|
||||||
|
postgresSelectAttachmentsExpiredQuery = `SELECT mid FROM message WHERE attachment_expires > 0 AND attachment_expires <= $1 AND attachment_deleted = FALSE`
|
||||||
|
postgresSelectAttachmentsSizeBySenderQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = '' AND sender = $1 AND attachment_expires >= $2`
|
||||||
|
postgresSelectAttachmentsSizeByUserIDQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = $1 AND attachment_expires >= $2`
|
||||||
|
|
||||||
|
postgresSelectStatsQuery = `SELECT value FROM message_stats WHERE key = 'messages'`
|
||||||
|
postgresUpdateStatsQuery = `UPDATE message_stats SET value = $1 WHERE key = 'messages'`
|
||||||
|
postgresUpdateMessageTimeQuery = `UPDATE message SET time = $1 WHERE mid = $2`
|
||||||
|
)
|
||||||
|
|
||||||
|
var pgQueries = queries{
|
||||||
|
insertMessage: postgresInsertMessageQuery,
|
||||||
|
deleteMessage: postgresDeleteMessageQuery,
|
||||||
|
selectScheduledMessageIDsBySeqID: postgresSelectScheduledMessageIDsBySeqIDQuery,
|
||||||
|
deleteScheduledBySequenceID: postgresDeleteScheduledBySequenceIDQuery,
|
||||||
|
updateMessagesForTopicExpiry: postgresUpdateMessagesForTopicExpiryQuery,
|
||||||
|
selectMessagesByID: postgresSelectMessagesByIDQuery,
|
||||||
|
selectMessagesSinceTime: postgresSelectMessagesSinceTimeQuery,
|
||||||
|
selectMessagesSinceTimeScheduled: postgresSelectMessagesSinceTimeIncludeScheduledQuery,
|
||||||
|
selectMessagesSinceID: postgresSelectMessagesSinceIDQuery,
|
||||||
|
selectMessagesSinceIDScheduled: postgresSelectMessagesSinceIDIncludeScheduledQuery,
|
||||||
|
selectMessagesLatest: postgresSelectMessagesLatestQuery,
|
||||||
|
selectMessagesDue: postgresSelectMessagesDueQuery,
|
||||||
|
selectMessagesExpired: postgresSelectMessagesExpiredQuery,
|
||||||
|
updateMessagePublished: postgresUpdateMessagePublishedQuery,
|
||||||
|
selectMessagesCount: postgresSelectMessagesCountQuery,
|
||||||
|
selectTopics: postgresSelectTopicsQuery,
|
||||||
|
updateAttachmentDeleted: postgresUpdateAttachmentDeletedQuery,
|
||||||
|
selectAttachmentsExpired: postgresSelectAttachmentsExpiredQuery,
|
||||||
|
selectAttachmentsSizeBySender: postgresSelectAttachmentsSizeBySenderQuery,
|
||||||
|
selectAttachmentsSizeByUserID: postgresSelectAttachmentsSizeByUserIDQuery,
|
||||||
|
selectStats: postgresSelectStatsQuery,
|
||||||
|
updateStats: postgresUpdateStatsQuery,
|
||||||
|
updateMessageTime: postgresUpdateMessageTimeQuery,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPostgresStore creates a new PostgreSQL-backed message cache store using an existing database connection pool.
|
||||||
|
func NewPostgresStore(db *sql.DB, batchSize int, batchTimeout time.Duration) (*Cache, error) {
|
||||||
|
if err := setupPostgres(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newCache(db, pgQueries, nil, batchSize, batchTimeout, false), nil
|
||||||
|
}
|
||||||
88
message/cache_postgres_schema.go
Normal file
88
message/cache_postgres_schema.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
package message
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initial PostgreSQL schema
|
||||||
|
const (
|
||||||
|
postgresCreateTablesQuery = `
|
||||||
|
CREATE TABLE IF NOT EXISTS message (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
mid TEXT NOT NULL,
|
||||||
|
sequence_id TEXT NOT NULL,
|
||||||
|
time BIGINT NOT NULL,
|
||||||
|
event TEXT NOT NULL,
|
||||||
|
expires BIGINT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
message TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
priority INT NOT NULL,
|
||||||
|
tags TEXT NOT NULL,
|
||||||
|
click TEXT NOT NULL,
|
||||||
|
icon TEXT NOT NULL,
|
||||||
|
actions TEXT NOT NULL,
|
||||||
|
attachment_name TEXT NOT NULL,
|
||||||
|
attachment_type TEXT NOT NULL,
|
||||||
|
attachment_size BIGINT NOT NULL,
|
||||||
|
attachment_expires BIGINT NOT NULL,
|
||||||
|
attachment_url TEXT NOT NULL,
|
||||||
|
attachment_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
sender TEXT NOT NULL,
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
content_type TEXT NOT NULL,
|
||||||
|
encoding TEXT NOT NULL,
|
||||||
|
published BOOLEAN NOT NULL DEFAULT FALSE
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_mid ON message (mid);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_sequence_id ON message (sequence_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_topic_published_time ON message (topic, published, time, id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_published_expires ON message (published, expires);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_sender_attachment_expires ON message (sender, attachment_expires) WHERE user_id = '';
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_message_user_id_attachment_expires ON message (user_id, attachment_expires);
|
||||||
|
CREATE TABLE IF NOT EXISTS message_stats (
|
||||||
|
key TEXT PRIMARY KEY,
|
||||||
|
value BIGINT
|
||||||
|
);
|
||||||
|
INSERT INTO message_stats (key, value) VALUES ('messages', 0);
|
||||||
|
CREATE TABLE IF NOT EXISTS schema_version (
|
||||||
|
store TEXT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL schema management queries
|
||||||
|
const (
|
||||||
|
pgCurrentSchemaVersion = 14
|
||||||
|
postgresInsertSchemaVersionQuery = `INSERT INTO schema_version (store, version) VALUES ('message', $1)`
|
||||||
|
postgresSelectSchemaVersionQuery = `SELECT version FROM schema_version WHERE store = 'message'`
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupPostgres(db *sql.DB) error {
|
||||||
|
var schemaVersion int
|
||||||
|
err := db.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewPostgresDB(db)
|
||||||
|
}
|
||||||
|
if schemaVersion > pgCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, pgCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewPostgresDB(db *sql.DB) error {
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(postgresCreateTablesQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(postgresInsertSchemaVersionQuery, pgCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
142
message/cache_sqlite.go
Normal file
142
message/cache_sqlite.go
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
package message
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SQLite runtime query constants
|
||||||
|
const (
|
||||||
|
sqliteInsertMessageQuery = `
|
||||||
|
INSERT INTO messages (mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user, content_type, encoding, published)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`
|
||||||
|
sqliteDeleteMessageQuery = `DELETE FROM messages WHERE mid = ?`
|
||||||
|
sqliteSelectScheduledMessageIDsBySeqIDQuery = `SELECT mid FROM messages WHERE topic = ? AND sequence_id = ? AND published = 0`
|
||||||
|
sqliteDeleteScheduledBySequenceIDQuery = `DELETE FROM messages WHERE topic = ? AND sequence_id = ? AND published = 0`
|
||||||
|
sqliteUpdateMessagesForTopicExpiryQuery = `UPDATE messages SET expires = ? WHERE topic = ?`
|
||||||
|
sqliteSelectMessagesByIDQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE mid = ?
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesSinceTimeQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE topic = ? AND time >= ? AND published = 1
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesSinceTimeIncludeScheduledQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE topic = ? AND time >= ?
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesSinceIDQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE topic = ? AND id > COALESCE((SELECT id FROM messages WHERE mid = ?), 0) AND published = 1
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesSinceIDIncludeScheduledQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE topic = ? AND (id > COALESCE((SELECT id FROM messages WHERE mid = ?), 0) OR published = 0)
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesLatestQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE topic = ? AND published = 1
|
||||||
|
ORDER BY time DESC, id DESC
|
||||||
|
LIMIT 1
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesDueQuery = `
|
||||||
|
SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, user, content_type, encoding
|
||||||
|
FROM messages
|
||||||
|
WHERE time <= ? AND published = 0
|
||||||
|
ORDER BY time, id
|
||||||
|
`
|
||||||
|
sqliteSelectMessagesExpiredQuery = `SELECT mid FROM messages WHERE expires <= ? AND published = 1`
|
||||||
|
sqliteUpdateMessagePublishedQuery = `UPDATE messages SET published = 1 WHERE mid = ?`
|
||||||
|
sqliteSelectMessagesCountQuery = `SELECT COUNT(*) FROM messages`
|
||||||
|
sqliteSelectTopicsQuery = `SELECT topic FROM messages GROUP BY topic`
|
||||||
|
|
||||||
|
sqliteUpdateAttachmentDeletedQuery = `UPDATE messages SET attachment_deleted = 1 WHERE mid = ?`
|
||||||
|
sqliteSelectAttachmentsExpiredQuery = `SELECT mid FROM messages WHERE attachment_expires > 0 AND attachment_expires <= ? AND attachment_deleted = 0`
|
||||||
|
sqliteSelectAttachmentsSizeBySenderQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = '' AND sender = ? AND attachment_expires >= ?`
|
||||||
|
sqliteSelectAttachmentsSizeByUserIDQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = ? AND attachment_expires >= ?`
|
||||||
|
|
||||||
|
sqliteSelectStatsQuery = `SELECT value FROM stats WHERE key = 'messages'`
|
||||||
|
sqliteUpdateStatsQuery = `UPDATE stats SET value = ? WHERE key = 'messages'`
|
||||||
|
sqliteUpdateMessageTimeQuery = `UPDATE messages SET time = ? WHERE mid = ?`
|
||||||
|
)
|
||||||
|
|
||||||
|
var sqliteQueries = queries{
|
||||||
|
insertMessage: sqliteInsertMessageQuery,
|
||||||
|
deleteMessage: sqliteDeleteMessageQuery,
|
||||||
|
selectScheduledMessageIDsBySeqID: sqliteSelectScheduledMessageIDsBySeqIDQuery,
|
||||||
|
deleteScheduledBySequenceID: sqliteDeleteScheduledBySequenceIDQuery,
|
||||||
|
updateMessagesForTopicExpiry: sqliteUpdateMessagesForTopicExpiryQuery,
|
||||||
|
selectMessagesByID: sqliteSelectMessagesByIDQuery,
|
||||||
|
selectMessagesSinceTime: sqliteSelectMessagesSinceTimeQuery,
|
||||||
|
selectMessagesSinceTimeScheduled: sqliteSelectMessagesSinceTimeIncludeScheduledQuery,
|
||||||
|
selectMessagesSinceID: sqliteSelectMessagesSinceIDQuery,
|
||||||
|
selectMessagesSinceIDScheduled: sqliteSelectMessagesSinceIDIncludeScheduledQuery,
|
||||||
|
selectMessagesLatest: sqliteSelectMessagesLatestQuery,
|
||||||
|
selectMessagesDue: sqliteSelectMessagesDueQuery,
|
||||||
|
selectMessagesExpired: sqliteSelectMessagesExpiredQuery,
|
||||||
|
updateMessagePublished: sqliteUpdateMessagePublishedQuery,
|
||||||
|
selectMessagesCount: sqliteSelectMessagesCountQuery,
|
||||||
|
selectTopics: sqliteSelectTopicsQuery,
|
||||||
|
updateAttachmentDeleted: sqliteUpdateAttachmentDeletedQuery,
|
||||||
|
selectAttachmentsExpired: sqliteSelectAttachmentsExpiredQuery,
|
||||||
|
selectAttachmentsSizeBySender: sqliteSelectAttachmentsSizeBySenderQuery,
|
||||||
|
selectAttachmentsSizeByUserID: sqliteSelectAttachmentsSizeByUserIDQuery,
|
||||||
|
selectStats: sqliteSelectStatsQuery,
|
||||||
|
updateStats: sqliteUpdateStatsQuery,
|
||||||
|
updateMessageTime: sqliteUpdateMessageTimeQuery,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSQLiteStore creates a SQLite file-backed cache
|
||||||
|
func NewSQLiteStore(filename, startupQueries string, cacheDuration time.Duration, batchSize int, batchTimeout time.Duration, nop bool) (*Cache, error) {
|
||||||
|
parentDir := filepath.Dir(filename)
|
||||||
|
if !util.FileExists(parentDir) {
|
||||||
|
return nil, fmt.Errorf("cache database directory %s does not exist or is not accessible", parentDir)
|
||||||
|
}
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := setupSQLite(db, startupQueries, cacheDuration); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newCache(db, sqliteQueries, &sync.Mutex{}, batchSize, batchTimeout, nop), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemStore creates an in-memory cache
|
||||||
|
func NewMemStore() (*Cache, error) {
|
||||||
|
return NewSQLiteStore(createMemoryFilename(), "", 0, 0, 0, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNopStore creates an in-memory cache that discards all messages;
|
||||||
|
// it is always empty and can be used if caching is entirely disabled
|
||||||
|
func NewNopStore() (*Cache, error) {
|
||||||
|
return NewSQLiteStore(createMemoryFilename(), "", 0, 0, 0, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createMemoryFilename creates a unique memory filename to use for the SQLite backend.
|
||||||
|
// From mattn/go-sqlite3: "Each connection to ":memory:" opens a brand new in-memory
|
||||||
|
// sql database, so if the stdlib's sql engine happens to open another connection and
|
||||||
|
// you've only specified ":memory:", that connection will see a brand new database.
|
||||||
|
// A workaround is to use "file::memory:?cache=shared" (or "file:foobar?mode=memory&cache=shared").
|
||||||
|
// Every connection to this string will point to the same in-memory database."
|
||||||
|
func createMemoryFilename() string {
|
||||||
|
return fmt.Sprintf("file:%s?mode=memory&cache=shared", util.RandomString(10))
|
||||||
|
}
|
||||||
466
message/cache_sqlite_schema.go
Normal file
466
message/cache_sqlite_schema.go
Normal file
@@ -0,0 +1,466 @@
|
|||||||
|
package message
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initial SQLite schema
|
||||||
|
const (
|
||||||
|
sqliteCreateTablesQuery = `
|
||||||
|
BEGIN;
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
mid TEXT NOT NULL,
|
||||||
|
sequence_id TEXT NOT NULL,
|
||||||
|
time INT NOT NULL,
|
||||||
|
event TEXT NOT NULL,
|
||||||
|
expires INT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
message TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
priority INT NOT NULL,
|
||||||
|
tags TEXT NOT NULL,
|
||||||
|
click TEXT NOT NULL,
|
||||||
|
icon TEXT NOT NULL,
|
||||||
|
actions TEXT NOT NULL,
|
||||||
|
attachment_name TEXT NOT NULL,
|
||||||
|
attachment_type TEXT NOT NULL,
|
||||||
|
attachment_size INT NOT NULL,
|
||||||
|
attachment_expires INT NOT NULL,
|
||||||
|
attachment_url TEXT NOT NULL,
|
||||||
|
attachment_deleted INT NOT NULL,
|
||||||
|
sender TEXT NOT NULL,
|
||||||
|
user TEXT NOT NULL,
|
||||||
|
content_type TEXT NOT NULL,
|
||||||
|
encoding TEXT NOT NULL,
|
||||||
|
published INT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_mid ON messages (mid);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sequence_id ON messages (sequence_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_time ON messages (time);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_expires ON messages (expires);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sender ON messages (sender);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_user ON messages (user);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_attachment_expires ON messages (attachment_expires);
|
||||||
|
CREATE TABLE IF NOT EXISTS stats (
|
||||||
|
key TEXT PRIMARY KEY,
|
||||||
|
value INT
|
||||||
|
);
|
||||||
|
INSERT INTO stats (key, value) VALUES ('messages', 0);
|
||||||
|
COMMIT;
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema version management for SQLite
|
||||||
|
const (
|
||||||
|
sqliteCurrentSchemaVersion = 14
|
||||||
|
sqliteCreateSchemaVersionTableQuery = `
|
||||||
|
CREATE TABLE IF NOT EXISTS schemaVersion (
|
||||||
|
id INT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
`
|
||||||
|
sqliteInsertSchemaVersionQuery = `INSERT INTO schemaVersion VALUES (1, ?)`
|
||||||
|
sqliteUpdateSchemaVersionQuery = `UPDATE schemaVersion SET version = ? WHERE id = 1`
|
||||||
|
sqliteSelectSchemaVersionQuery = `SELECT version FROM schemaVersion WHERE id = 1`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema migrations for SQLite
|
||||||
|
const (
|
||||||
|
// 0 -> 1
|
||||||
|
sqliteMigrate0To1AlterMessagesTableQuery = `
|
||||||
|
BEGIN;
|
||||||
|
ALTER TABLE messages ADD COLUMN title TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN priority INT NOT NULL DEFAULT(0);
|
||||||
|
ALTER TABLE messages ADD COLUMN tags TEXT NOT NULL DEFAULT('');
|
||||||
|
COMMIT;
|
||||||
|
`
|
||||||
|
|
||||||
|
// 1 -> 2
|
||||||
|
sqliteMigrate1To2AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN published INT NOT NULL DEFAULT(1);
|
||||||
|
`
|
||||||
|
|
||||||
|
// 2 -> 3
|
||||||
|
sqliteMigrate2To3AlterMessagesTableQuery = `
|
||||||
|
BEGIN;
|
||||||
|
ALTER TABLE messages ADD COLUMN click TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_name TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_type TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_size INT NOT NULL DEFAULT('0');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_expires INT NOT NULL DEFAULT('0');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_owner TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_url TEXT NOT NULL DEFAULT('');
|
||||||
|
COMMIT;
|
||||||
|
`
|
||||||
|
// 3 -> 4
|
||||||
|
sqliteMigrate3To4AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN encoding TEXT NOT NULL DEFAULT('');
|
||||||
|
`
|
||||||
|
|
||||||
|
// 4 -> 5
|
||||||
|
sqliteMigrate4To5AlterMessagesTableQuery = `
|
||||||
|
BEGIN;
|
||||||
|
CREATE TABLE IF NOT EXISTS messages_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
mid TEXT NOT NULL,
|
||||||
|
time INT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
message TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
priority INT NOT NULL,
|
||||||
|
tags TEXT NOT NULL,
|
||||||
|
click TEXT NOT NULL,
|
||||||
|
attachment_name TEXT NOT NULL,
|
||||||
|
attachment_type TEXT NOT NULL,
|
||||||
|
attachment_size INT NOT NULL,
|
||||||
|
attachment_expires INT NOT NULL,
|
||||||
|
attachment_url TEXT NOT NULL,
|
||||||
|
attachment_owner TEXT NOT NULL,
|
||||||
|
encoding TEXT NOT NULL,
|
||||||
|
published INT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_mid ON messages_new (mid);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages_new (topic);
|
||||||
|
INSERT
|
||||||
|
INTO messages_new (
|
||||||
|
mid, time, topic, message, title, priority, tags, click, attachment_name, attachment_type,
|
||||||
|
attachment_size, attachment_expires, attachment_url, attachment_owner, encoding, published)
|
||||||
|
SELECT
|
||||||
|
id, time, topic, message, title, priority, tags, click, attachment_name, attachment_type,
|
||||||
|
attachment_size, attachment_expires, attachment_url, attachment_owner, encoding, published
|
||||||
|
FROM messages;
|
||||||
|
DROP TABLE messages;
|
||||||
|
ALTER TABLE messages_new RENAME TO messages;
|
||||||
|
COMMIT;
|
||||||
|
`
|
||||||
|
|
||||||
|
// 5 -> 6
|
||||||
|
sqliteMigrate5To6AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN actions TEXT NOT NULL DEFAULT('');
|
||||||
|
`
|
||||||
|
|
||||||
|
// 6 -> 7
|
||||||
|
sqliteMigrate6To7AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages RENAME COLUMN attachment_owner TO sender;
|
||||||
|
`
|
||||||
|
|
||||||
|
// 7 -> 8
|
||||||
|
sqliteMigrate7To8AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN icon TEXT NOT NULL DEFAULT('');
|
||||||
|
`
|
||||||
|
|
||||||
|
// 8 -> 9
|
||||||
|
sqliteMigrate8To9AlterMessagesTableQuery = `
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_time ON messages (time);
|
||||||
|
`
|
||||||
|
|
||||||
|
// 9 -> 10
|
||||||
|
sqliteMigrate9To10AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN user TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN attachment_deleted INT NOT NULL DEFAULT('0');
|
||||||
|
ALTER TABLE messages ADD COLUMN expires INT NOT NULL DEFAULT('0');
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_expires ON messages (expires);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sender ON messages (sender);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_user ON messages (user);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_attachment_expires ON messages (attachment_expires);
|
||||||
|
`
|
||||||
|
sqliteMigrate9To10UpdateMessageExpiryQuery = `UPDATE messages SET expires = time + ?`
|
||||||
|
|
||||||
|
// 10 -> 11
|
||||||
|
sqliteMigrate10To11AlterMessagesTableQuery = `
|
||||||
|
CREATE TABLE IF NOT EXISTS stats (
|
||||||
|
key TEXT PRIMARY KEY,
|
||||||
|
value INT
|
||||||
|
);
|
||||||
|
INSERT INTO stats (key, value) VALUES ('messages', 0);
|
||||||
|
`
|
||||||
|
|
||||||
|
// 11 -> 12
|
||||||
|
sqliteMigrate11To12AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN content_type TEXT NOT NULL DEFAULT('');
|
||||||
|
`
|
||||||
|
|
||||||
|
// 12 -> 13
|
||||||
|
sqliteMigrate12To13AlterMessagesTableQuery = `
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
||||||
|
`
|
||||||
|
|
||||||
|
// 13 -> 14
|
||||||
|
sqliteMigrate13To14AlterMessagesTableQuery = `
|
||||||
|
ALTER TABLE messages ADD COLUMN sequence_id TEXT NOT NULL DEFAULT('');
|
||||||
|
ALTER TABLE messages ADD COLUMN event TEXT NOT NULL DEFAULT('message');
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sequence_id ON messages (sequence_id);
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
sqliteMigrations = map[int]func(db *sql.DB, cacheDuration time.Duration) error{
|
||||||
|
0: sqliteMigrateFrom0,
|
||||||
|
1: sqliteMigrateFrom1,
|
||||||
|
2: sqliteMigrateFrom2,
|
||||||
|
3: sqliteMigrateFrom3,
|
||||||
|
4: sqliteMigrateFrom4,
|
||||||
|
5: sqliteMigrateFrom5,
|
||||||
|
6: sqliteMigrateFrom6,
|
||||||
|
7: sqliteMigrateFrom7,
|
||||||
|
8: sqliteMigrateFrom8,
|
||||||
|
9: sqliteMigrateFrom9,
|
||||||
|
10: sqliteMigrateFrom10,
|
||||||
|
11: sqliteMigrateFrom11,
|
||||||
|
12: sqliteMigrateFrom12,
|
||||||
|
13: sqliteMigrateFrom13,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupSQLite(db *sql.DB, startupQueries string, cacheDuration time.Duration) error {
|
||||||
|
if err := runSQLiteStartupQueries(db, startupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If 'messages' table does not exist, this must be a new database
|
||||||
|
rowsMC, err := db.Query(sqliteSelectMessagesCountQuery)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewSQLite(db)
|
||||||
|
}
|
||||||
|
rowsMC.Close()
|
||||||
|
// If 'messages' table exists, check 'schemaVersion' table
|
||||||
|
schemaVersion := 0
|
||||||
|
rowsSV, err := db.Query(sqliteSelectSchemaVersionQuery)
|
||||||
|
if err == nil {
|
||||||
|
defer rowsSV.Close()
|
||||||
|
if !rowsSV.Next() {
|
||||||
|
return fmt.Errorf("cannot determine schema version: cache file may be corrupt")
|
||||||
|
}
|
||||||
|
if err := rowsSV.Scan(&schemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rowsSV.Close()
|
||||||
|
}
|
||||||
|
// Do migrations
|
||||||
|
if schemaVersion == sqliteCurrentSchemaVersion {
|
||||||
|
return nil
|
||||||
|
} else if schemaVersion > sqliteCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, sqliteCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
for i := schemaVersion; i < sqliteCurrentSchemaVersion; i++ {
|
||||||
|
fn, ok := sqliteMigrations[i]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot find migration step from schema version %d to %d", i, i+1)
|
||||||
|
} else if err := fn(db, cacheDuration); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewSQLite(db *sql.DB) error {
|
||||||
|
if _, err := db.Exec(sqliteCreateTablesQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteCreateSchemaVersionTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteInsertSchemaVersionQuery, sqliteCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSQLiteStartupQueries(db *sql.DB, startupQueries string) error {
|
||||||
|
if startupQueries != "" {
|
||||||
|
if _, err := db.Exec(startupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom0(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 0 to 1")
|
||||||
|
if _, err := db.Exec(sqliteMigrate0To1AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteCreateSchemaVersionTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteInsertSchemaVersionQuery, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom1(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 1 to 2")
|
||||||
|
if _, err := db.Exec(sqliteMigrate1To2AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom2(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 2 to 3")
|
||||||
|
if _, err := db.Exec(sqliteMigrate2To3AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 3); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom3(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 3 to 4")
|
||||||
|
if _, err := db.Exec(sqliteMigrate3To4AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 4); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom4(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 4 to 5")
|
||||||
|
if _, err := db.Exec(sqliteMigrate4To5AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 5); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom5(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 5 to 6")
|
||||||
|
if _, err := db.Exec(sqliteMigrate5To6AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 6); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom6(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 6 to 7")
|
||||||
|
if _, err := db.Exec(sqliteMigrate6To7AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 7); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom7(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 7 to 8")
|
||||||
|
if _, err := db.Exec(sqliteMigrate7To8AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 8); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom8(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 8 to 9")
|
||||||
|
if _, err := db.Exec(sqliteMigrate8To9AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteUpdateSchemaVersionQuery, 9); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom9(db *sql.DB, cacheDuration time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 9 to 10")
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteMigrate9To10AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteMigrate9To10UpdateMessageExpiryQuery, int64(cacheDuration.Seconds())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 10); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom10(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 10 to 11")
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteMigrate10To11AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 11); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom11(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 11 to 12")
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteMigrate11To12AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 12); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom12(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 12 to 13")
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteMigrate12To13AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 13); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom13(db *sql.DB, _ time.Duration) error {
|
||||||
|
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 13 to 14")
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteMigrate13To14AlterMessagesTableQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 14); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
292
message/cache_sqlite_test.go
Normal file
292
message/cache_sqlite_test.go
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
package message_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/message"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSqliteStore_Migration_From0(t *testing.T) {
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Create "version 0" schema
|
||||||
|
_, err = db.Exec(`
|
||||||
|
BEGIN;
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (
|
||||||
|
id VARCHAR(20) PRIMARY KEY,
|
||||||
|
time INT NOT NULL,
|
||||||
|
topic VARCHAR(64) NOT NULL,
|
||||||
|
message VARCHAR(1024) NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
||||||
|
COMMIT;
|
||||||
|
`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Insert a bunch of messages
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
_, err = db.Exec(`INSERT INTO messages (id, time, topic, message) VALUES (?, ?, ?, ?)`,
|
||||||
|
fmt.Sprintf("abcd%d", i), time.Now().Unix(), "mytopic", fmt.Sprintf("some message %d", i))
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
require.Nil(t, db.Close())
|
||||||
|
|
||||||
|
// Create store to trigger migration
|
||||||
|
s := newSqliteTestStoreFromFile(t, filename, "")
|
||||||
|
checkSqliteSchemaVersion(t, filename)
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 10, len(messages))
|
||||||
|
require.Equal(t, "some message 5", messages[5].Message)
|
||||||
|
require.Equal(t, "", messages[5].Title)
|
||||||
|
require.Nil(t, messages[5].Tags)
|
||||||
|
require.Equal(t, 0, messages[5].Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSqliteStore_Migration_From1(t *testing.T) {
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Create "version 1" schema
|
||||||
|
_, err = db.Exec(`
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (
|
||||||
|
id VARCHAR(20) PRIMARY KEY,
|
||||||
|
time INT NOT NULL,
|
||||||
|
topic VARCHAR(64) NOT NULL,
|
||||||
|
message VARCHAR(512) NOT NULL,
|
||||||
|
title VARCHAR(256) NOT NULL,
|
||||||
|
priority INT NOT NULL,
|
||||||
|
tags VARCHAR(256) NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
||||||
|
CREATE TABLE IF NOT EXISTS schemaVersion (
|
||||||
|
id INT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO schemaVersion (id, version) VALUES (1, 1);
|
||||||
|
`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Insert a bunch of messages
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
_, err = db.Exec(`INSERT INTO messages (id, time, topic, message, title, priority, tags) VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||||
|
fmt.Sprintf("abcd%d", i), time.Now().Unix(), "mytopic", fmt.Sprintf("some message %d", i), "", 0, "")
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
require.Nil(t, db.Close())
|
||||||
|
|
||||||
|
// Create store to trigger migration
|
||||||
|
s := newSqliteTestStoreFromFile(t, filename, "")
|
||||||
|
checkSqliteSchemaVersion(t, filename)
|
||||||
|
|
||||||
|
// Add delayed message
|
||||||
|
delayedMessage := model.NewDefaultMessage("mytopic", "some delayed message")
|
||||||
|
delayedMessage.Time = time.Now().Add(time.Minute).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(delayedMessage))
|
||||||
|
|
||||||
|
// 10, not 11!
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 10, len(messages))
|
||||||
|
|
||||||
|
// 11!
|
||||||
|
messages, err = s.Messages("mytopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 11, len(messages))
|
||||||
|
|
||||||
|
// Check that index "idx_topic" exists
|
||||||
|
verifyDB, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
defer verifyDB.Close()
|
||||||
|
rows, err := verifyDB.Query(`SELECT name FROM sqlite_master WHERE type='index' AND name='idx_topic'`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.True(t, rows.Next())
|
||||||
|
var indexName string
|
||||||
|
require.Nil(t, rows.Scan(&indexName))
|
||||||
|
require.Equal(t, "idx_topic", indexName)
|
||||||
|
require.Nil(t, rows.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSqliteStore_Migration_From9(t *testing.T) {
|
||||||
|
// This primarily tests the awkward migration that introduces the "expires" column.
|
||||||
|
// The migration logic has to update the column, using the existing "cache-duration" value.
|
||||||
|
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Create "version 9" schema
|
||||||
|
_, err = db.Exec(`
|
||||||
|
BEGIN;
|
||||||
|
CREATE TABLE IF NOT EXISTS messages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
mid TEXT NOT NULL,
|
||||||
|
time INT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
message TEXT NOT NULL,
|
||||||
|
title TEXT NOT NULL,
|
||||||
|
priority INT NOT NULL,
|
||||||
|
tags TEXT NOT NULL,
|
||||||
|
click TEXT NOT NULL,
|
||||||
|
icon TEXT NOT NULL,
|
||||||
|
actions TEXT NOT NULL,
|
||||||
|
attachment_name TEXT NOT NULL,
|
||||||
|
attachment_type TEXT NOT NULL,
|
||||||
|
attachment_size INT NOT NULL,
|
||||||
|
attachment_expires INT NOT NULL,
|
||||||
|
attachment_url TEXT NOT NULL,
|
||||||
|
sender TEXT NOT NULL,
|
||||||
|
encoding TEXT NOT NULL,
|
||||||
|
published INT NOT NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_mid ON messages (mid);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_time ON messages (time);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
||||||
|
CREATE TABLE IF NOT EXISTS schemaVersion (
|
||||||
|
id INT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO schemaVersion (id, version) VALUES (1, 9);
|
||||||
|
COMMIT;
|
||||||
|
`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// Insert a bunch of messages
|
||||||
|
insertQuery := `
|
||||||
|
INSERT INTO messages (mid, time, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, encoding, published)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
_, err = db.Exec(
|
||||||
|
insertQuery,
|
||||||
|
fmt.Sprintf("abcd%d", i),
|
||||||
|
time.Now().Unix(),
|
||||||
|
"mytopic",
|
||||||
|
fmt.Sprintf("some message %d", i),
|
||||||
|
"", // title
|
||||||
|
0, // priority
|
||||||
|
"", // tags
|
||||||
|
"", // click
|
||||||
|
"", // icon
|
||||||
|
"", // actions
|
||||||
|
"", // attachment_name
|
||||||
|
"", // attachment_type
|
||||||
|
0, // attachment_size
|
||||||
|
0, // attachment_expires
|
||||||
|
"", // attachment_url
|
||||||
|
"9.9.9.9", // sender
|
||||||
|
"", // encoding
|
||||||
|
1, // published
|
||||||
|
)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
require.Nil(t, db.Close())
|
||||||
|
|
||||||
|
// Create store to trigger migration
|
||||||
|
cacheDuration := 17 * time.Hour
|
||||||
|
s, err := message.NewSQLiteStore(filename, "", cacheDuration, 0, 0, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
checkSqliteSchemaVersion(t, filename)
|
||||||
|
|
||||||
|
// Check version
|
||||||
|
verifyDB, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
defer verifyDB.Close()
|
||||||
|
rows, err := verifyDB.Query(`SELECT version FROM schemaVersion WHERE id = 1`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.True(t, rows.Next())
|
||||||
|
var version int
|
||||||
|
require.Nil(t, rows.Scan(&version))
|
||||||
|
require.Equal(t, 14, version)
|
||||||
|
require.Nil(t, rows.Close())
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 10, len(messages))
|
||||||
|
for _, m := range messages {
|
||||||
|
require.True(t, m.Expires > time.Now().Add(cacheDuration-5*time.Second).Unix())
|
||||||
|
require.True(t, m.Expires < time.Now().Add(cacheDuration+5*time.Second).Unix())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSqliteStore_StartupQueries_WAL(t *testing.T) {
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
startupQueries := `pragma journal_mode = WAL;
|
||||||
|
pragma synchronous = normal;
|
||||||
|
pragma temp_store = memory;`
|
||||||
|
s, err := message.NewSQLiteStore(filename, startupQueries, time.Hour, 0, 0, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("mytopic", "some message")))
|
||||||
|
require.FileExists(t, filename)
|
||||||
|
require.FileExists(t, filename+"-wal")
|
||||||
|
require.FileExists(t, filename+"-shm")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSqliteStore_StartupQueries_None(t *testing.T) {
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
s, err := message.NewSQLiteStore(filename, "", time.Hour, 0, 0, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("mytopic", "some message")))
|
||||||
|
require.FileExists(t, filename)
|
||||||
|
require.NoFileExists(t, filename+"-wal")
|
||||||
|
require.NoFileExists(t, filename+"-shm")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSqliteStore_StartupQueries_Fail(t *testing.T) {
|
||||||
|
filename := newSqliteTestStoreFile(t)
|
||||||
|
_, err := message.NewSQLiteStore(filename, `xx error`, time.Hour, 0, 0, false)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNopStore(t *testing.T) {
|
||||||
|
s, err := message.NewNopStore()
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("mytopic", "my message")))
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Empty(t, messages)
|
||||||
|
|
||||||
|
topics, err := s.Topics()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Empty(t, topics)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSqliteTestStoreFile(t *testing.T) string {
|
||||||
|
return filepath.Join(t.TempDir(), "cache.db")
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSqliteTestStoreFromFile(t *testing.T, filename, startupQueries string) *message.Cache {
|
||||||
|
s, err := message.NewSQLiteStore(filename, startupQueries, time.Hour, 0, 0, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSqliteSchemaVersion(t *testing.T, filename string) {
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
require.Nil(t, err)
|
||||||
|
defer db.Close()
|
||||||
|
rows, err := db.Query(`SELECT version FROM schemaVersion`)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.True(t, rows.Next())
|
||||||
|
var schemaVersion int
|
||||||
|
require.Nil(t, rows.Scan(&schemaVersion))
|
||||||
|
require.Equal(t, 14, schemaVersion)
|
||||||
|
require.Nil(t, rows.Close())
|
||||||
|
}
|
||||||
829
message/cache_test.go
Normal file
829
message/cache_test.go
Normal file
@@ -0,0 +1,829 @@
|
|||||||
|
package message_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
dbtest "heckel.io/ntfy/v2/db/test"
|
||||||
|
"heckel.io/ntfy/v2/message"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newSqliteTestStore(t *testing.T) *message.Cache {
|
||||||
|
filename := filepath.Join(t.TempDir(), "cache.db")
|
||||||
|
s, err := message.NewSQLiteStore(filename, "", time.Hour, 0, 0, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMemTestStore(t *testing.T) *message.Cache {
|
||||||
|
s, err := message.NewMemStore()
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { s.Close() })
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestPostgresStore(t *testing.T) *message.Cache {
|
||||||
|
testDB := dbtest.CreateTestPostgres(t)
|
||||||
|
store, err := message.NewPostgresStore(testDB, 0, 0)
|
||||||
|
require.Nil(t, err)
|
||||||
|
return store
|
||||||
|
}
|
||||||
|
|
||||||
|
func forEachBackend(t *testing.T, f func(t *testing.T, s *message.Cache)) {
|
||||||
|
t.Run("sqlite", func(t *testing.T) {
|
||||||
|
f(t, newSqliteTestStore(t))
|
||||||
|
})
|
||||||
|
t.Run("mem", func(t *testing.T) {
|
||||||
|
f(t, newMemTestStore(t))
|
||||||
|
})
|
||||||
|
t.Run("postgres", func(t *testing.T) {
|
||||||
|
f(t, newTestPostgresStore(t))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Messages(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "my message")
|
||||||
|
m1.Time = 1
|
||||||
|
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "my other message")
|
||||||
|
m2.Time = 2
|
||||||
|
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("example", "my example message")))
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
|
||||||
|
// Adding invalid
|
||||||
|
require.Equal(t, model.ErrUnexpectedMessageType, s.AddMessage(model.NewKeepaliveMessage("mytopic"))) // These should not be added!
|
||||||
|
require.Equal(t, model.ErrUnexpectedMessageType, s.AddMessage(model.NewOpenMessage("example"))) // These should not be added!
|
||||||
|
|
||||||
|
// count
|
||||||
|
count, err := s.MessagesCount()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 3, count)
|
||||||
|
|
||||||
|
// mytopic: since all
|
||||||
|
messages, _ := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
require.Equal(t, "my message", messages[0].Message)
|
||||||
|
require.Equal(t, "mytopic", messages[0].Topic)
|
||||||
|
require.Equal(t, model.MessageEvent, messages[0].Event)
|
||||||
|
require.Equal(t, "", messages[0].Title)
|
||||||
|
require.Equal(t, 0, messages[0].Priority)
|
||||||
|
require.Nil(t, messages[0].Tags)
|
||||||
|
require.Equal(t, "my other message", messages[1].Message)
|
||||||
|
|
||||||
|
// mytopic: since none
|
||||||
|
messages, _ = s.Messages("mytopic", model.SinceNoMessages, false)
|
||||||
|
require.Empty(t, messages)
|
||||||
|
|
||||||
|
// mytopic: since m1 (by ID)
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceID(m1.ID), false)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, m2.ID, messages[0].ID)
|
||||||
|
require.Equal(t, "my other message", messages[0].Message)
|
||||||
|
require.Equal(t, "mytopic", messages[0].Topic)
|
||||||
|
|
||||||
|
// mytopic: since 2
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceTime(2), false)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "my other message", messages[0].Message)
|
||||||
|
|
||||||
|
// mytopic: latest
|
||||||
|
messages, _ = s.Messages("mytopic", model.SinceLatestMessage, false)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "my other message", messages[0].Message)
|
||||||
|
|
||||||
|
// example: since all
|
||||||
|
messages, _ = s.Messages("example", model.SinceAllMessages, false)
|
||||||
|
require.Equal(t, "my example message", messages[0].Message)
|
||||||
|
|
||||||
|
// non-existing: since all
|
||||||
|
messages, _ = s.Messages("doesnotexist", model.SinceAllMessages, false)
|
||||||
|
require.Empty(t, messages)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessagesLock(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < 5000; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
assert.Nil(t, s.AddMessage(model.NewDefaultMessage("mytopic", "test message")))
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessagesScheduled(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "message 1")
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "message 2")
|
||||||
|
m2.Time = time.Now().Add(time.Hour).Unix()
|
||||||
|
m3 := model.NewDefaultMessage("mytopic", "message 3")
|
||||||
|
m3.Time = time.Now().Add(time.Minute).Unix() // earlier than m2!
|
||||||
|
m4 := model.NewDefaultMessage("mytopic2", "message 4")
|
||||||
|
m4.Time = time.Now().Add(time.Minute).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
require.Nil(t, s.AddMessage(m3))
|
||||||
|
|
||||||
|
messages, _ := s.Messages("mytopic", model.SinceAllMessages, false) // exclude scheduled
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "message 1", messages[0].Message)
|
||||||
|
|
||||||
|
messages, _ = s.Messages("mytopic", model.SinceAllMessages, true) // include scheduled
|
||||||
|
require.Equal(t, 3, len(messages))
|
||||||
|
require.Equal(t, "message 1", messages[0].Message)
|
||||||
|
require.Equal(t, "message 3", messages[1].Message) // Order!
|
||||||
|
require.Equal(t, "message 2", messages[2].Message)
|
||||||
|
|
||||||
|
messages, _ = s.MessagesDue()
|
||||||
|
require.Empty(t, messages)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Topics(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("topic1", "my example message")))
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("topic2", "message 1")))
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("topic2", "message 2")))
|
||||||
|
require.Nil(t, s.AddMessage(model.NewDefaultMessage("topic2", "message 3")))
|
||||||
|
|
||||||
|
topics, err := s.Topics()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, 2, len(topics))
|
||||||
|
require.Contains(t, topics, "topic1")
|
||||||
|
require.Contains(t, topics, "topic2")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessagesTagsPrioAndTitle(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m := model.NewDefaultMessage("mytopic", "some message")
|
||||||
|
m.Tags = []string{"tag1", "tag2"}
|
||||||
|
m.Priority = 5
|
||||||
|
m.Title = "some title"
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
messages, _ := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Equal(t, []string{"tag1", "tag2"}, messages[0].Tags)
|
||||||
|
require.Equal(t, 5, messages[0].Priority)
|
||||||
|
require.Equal(t, "some title", messages[0].Title)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessagesSinceID(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "message 1")
|
||||||
|
m1.Time = 100
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "message 2")
|
||||||
|
m2.Time = 200
|
||||||
|
m3 := model.NewDefaultMessage("mytopic", "message 3")
|
||||||
|
m3.Time = time.Now().Add(time.Hour).Unix() // Scheduled, in the future, later than m7 and m5
|
||||||
|
m4 := model.NewDefaultMessage("mytopic", "message 4")
|
||||||
|
m4.Time = 400
|
||||||
|
m5 := model.NewDefaultMessage("mytopic", "message 5")
|
||||||
|
m5.Time = time.Now().Add(time.Minute).Unix() // Scheduled, in the future, later than m7
|
||||||
|
m6 := model.NewDefaultMessage("mytopic", "message 6")
|
||||||
|
m6.Time = 600
|
||||||
|
m7 := model.NewDefaultMessage("mytopic", "message 7")
|
||||||
|
m7.Time = 700
|
||||||
|
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
require.Nil(t, s.AddMessage(m3))
|
||||||
|
require.Nil(t, s.AddMessage(m4))
|
||||||
|
require.Nil(t, s.AddMessage(m5))
|
||||||
|
require.Nil(t, s.AddMessage(m6))
|
||||||
|
require.Nil(t, s.AddMessage(m7))
|
||||||
|
|
||||||
|
// Case 1: Since ID exists, exclude scheduled
|
||||||
|
messages, _ := s.Messages("mytopic", model.NewSinceID(m2.ID), false)
|
||||||
|
require.Equal(t, 3, len(messages))
|
||||||
|
require.Equal(t, "message 4", messages[0].Message)
|
||||||
|
require.Equal(t, "message 6", messages[1].Message) // Not scheduled m3/m5!
|
||||||
|
require.Equal(t, "message 7", messages[2].Message)
|
||||||
|
|
||||||
|
// Case 2: Since ID exists, include scheduled
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceID(m2.ID), true)
|
||||||
|
require.Equal(t, 5, len(messages))
|
||||||
|
require.Equal(t, "message 4", messages[0].Message)
|
||||||
|
require.Equal(t, "message 6", messages[1].Message)
|
||||||
|
require.Equal(t, "message 7", messages[2].Message)
|
||||||
|
require.Equal(t, "message 5", messages[3].Message) // Order!
|
||||||
|
require.Equal(t, "message 3", messages[4].Message) // Order!
|
||||||
|
|
||||||
|
// Case 3: Since ID does not exist (-> Return all messages), include scheduled
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceID("doesntexist"), true)
|
||||||
|
require.Equal(t, 7, len(messages))
|
||||||
|
require.Equal(t, "message 1", messages[0].Message)
|
||||||
|
require.Equal(t, "message 2", messages[1].Message)
|
||||||
|
require.Equal(t, "message 4", messages[2].Message)
|
||||||
|
require.Equal(t, "message 6", messages[3].Message)
|
||||||
|
require.Equal(t, "message 7", messages[4].Message)
|
||||||
|
require.Equal(t, "message 5", messages[5].Message) // Order!
|
||||||
|
require.Equal(t, "message 3", messages[6].Message) // Order!
|
||||||
|
|
||||||
|
// Case 4: Since ID exists and is last message (-> Return no messages), exclude scheduled
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceID(m7.ID), false)
|
||||||
|
require.Equal(t, 0, len(messages))
|
||||||
|
|
||||||
|
// Case 5: Since ID exists and is last message (-> Return no messages), include scheduled
|
||||||
|
messages, _ = s.Messages("mytopic", model.NewSinceID(m7.ID), true)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
require.Equal(t, "message 5", messages[0].Message)
|
||||||
|
require.Equal(t, "message 3", messages[1].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Prune(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
now := time.Now().Unix()
|
||||||
|
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "my message")
|
||||||
|
m1.Time = now - 10
|
||||||
|
m1.Expires = now - 5
|
||||||
|
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "my other message")
|
||||||
|
m2.Time = now - 5
|
||||||
|
m2.Expires = now + 5 // In the future
|
||||||
|
|
||||||
|
m3 := model.NewDefaultMessage("another_topic", "and another one")
|
||||||
|
m3.Time = now - 12
|
||||||
|
m3.Expires = now - 2
|
||||||
|
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
require.Nil(t, s.AddMessage(m3))
|
||||||
|
|
||||||
|
count, err := s.MessagesCount()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 3, count)
|
||||||
|
|
||||||
|
expiredMessageIDs, err := s.MessagesExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Nil(t, s.DeleteMessages(expiredMessageIDs...))
|
||||||
|
|
||||||
|
count, err = s.MessagesCount()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, count)
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "my other message", messages[0].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Attachments(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
expires1 := time.Now().Add(-4 * time.Hour).Unix() // Expired
|
||||||
|
m := model.NewDefaultMessage("mytopic", "flower for you")
|
||||||
|
m.ID = "m1"
|
||||||
|
m.SequenceID = "m1"
|
||||||
|
m.Sender = netip.MustParseAddr("1.2.3.4")
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "flower.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Size: 5000,
|
||||||
|
Expires: expires1,
|
||||||
|
URL: "https://ntfy.sh/file/AbDeFgJhal.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
expires2 := time.Now().Add(2 * time.Hour).Unix() // Future
|
||||||
|
m = model.NewDefaultMessage("mytopic", "sending you a car")
|
||||||
|
m.ID = "m2"
|
||||||
|
m.SequenceID = "m2"
|
||||||
|
m.Sender = netip.MustParseAddr("1.2.3.4")
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "car.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Size: 10000,
|
||||||
|
Expires: expires2,
|
||||||
|
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
expires3 := time.Now().Add(1 * time.Hour).Unix() // Future
|
||||||
|
m = model.NewDefaultMessage("another-topic", "sending you another car")
|
||||||
|
m.ID = "m3"
|
||||||
|
m.SequenceID = "m3"
|
||||||
|
m.User = "u_BAsbaAa"
|
||||||
|
m.Sender = netip.MustParseAddr("5.6.7.8")
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "another-car.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Size: 20000,
|
||||||
|
Expires: expires3,
|
||||||
|
URL: "https://ntfy.sh/file/zakaDHFW.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
|
||||||
|
require.Equal(t, "flower for you", messages[0].Message)
|
||||||
|
require.Equal(t, "flower.jpg", messages[0].Attachment.Name)
|
||||||
|
require.Equal(t, "image/jpeg", messages[0].Attachment.Type)
|
||||||
|
require.Equal(t, int64(5000), messages[0].Attachment.Size)
|
||||||
|
require.Equal(t, expires1, messages[0].Attachment.Expires)
|
||||||
|
require.Equal(t, "https://ntfy.sh/file/AbDeFgJhal.jpg", messages[0].Attachment.URL)
|
||||||
|
require.Equal(t, "1.2.3.4", messages[0].Sender.String())
|
||||||
|
|
||||||
|
require.Equal(t, "sending you a car", messages[1].Message)
|
||||||
|
require.Equal(t, "car.jpg", messages[1].Attachment.Name)
|
||||||
|
require.Equal(t, "image/jpeg", messages[1].Attachment.Type)
|
||||||
|
require.Equal(t, int64(10000), messages[1].Attachment.Size)
|
||||||
|
require.Equal(t, expires2, messages[1].Attachment.Expires)
|
||||||
|
require.Equal(t, "https://ntfy.sh/file/aCaRURL.jpg", messages[1].Attachment.URL)
|
||||||
|
require.Equal(t, "1.2.3.4", messages[1].Sender.String())
|
||||||
|
|
||||||
|
size, err := s.AttachmentBytesUsedBySender("1.2.3.4")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(10000), size)
|
||||||
|
|
||||||
|
size, err = s.AttachmentBytesUsedBySender("5.6.7.8")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(0), size) // Accounted to the user, not the IP!
|
||||||
|
|
||||||
|
size, err = s.AttachmentBytesUsedByUser("u_BAsbaAa")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(20000), size)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_AttachmentsExpired(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m := model.NewDefaultMessage("mytopic", "flower for you")
|
||||||
|
m.ID = "m1"
|
||||||
|
m.SequenceID = "m1"
|
||||||
|
m.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
m = model.NewDefaultMessage("mytopic", "message with attachment")
|
||||||
|
m.ID = "m2"
|
||||||
|
m.SequenceID = "m2"
|
||||||
|
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "car.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Size: 10000,
|
||||||
|
Expires: time.Now().Add(2 * time.Hour).Unix(),
|
||||||
|
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
m = model.NewDefaultMessage("mytopic", "message with external attachment")
|
||||||
|
m.ID = "m3"
|
||||||
|
m.SequenceID = "m3"
|
||||||
|
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "car.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Expires: 0, // Unknown!
|
||||||
|
URL: "https://somedomain.com/car.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
m = model.NewDefaultMessage("mytopic2", "message with expired attachment")
|
||||||
|
m.ID = "m4"
|
||||||
|
m.SequenceID = "m4"
|
||||||
|
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
||||||
|
m.Attachment = &model.Attachment{
|
||||||
|
Name: "expired-car.jpg",
|
||||||
|
Type: "image/jpeg",
|
||||||
|
Size: 20000,
|
||||||
|
Expires: time.Now().Add(-1 * time.Hour).Unix(),
|
||||||
|
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
ids, err := s.AttachmentsExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(ids))
|
||||||
|
require.Equal(t, "m4", ids[0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Sender(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "mymessage")
|
||||||
|
m1.Sender = netip.MustParseAddr("1.2.3.4")
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "mymessage without sender")
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
require.Equal(t, messages[0].Sender, netip.MustParseAddr("1.2.3.4"))
|
||||||
|
require.Equal(t, messages[1].Sender, netip.Addr{})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_DeleteScheduledBySequenceID(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Create a scheduled (unpublished) message
|
||||||
|
scheduledMsg := model.NewDefaultMessage("mytopic", "scheduled message")
|
||||||
|
scheduledMsg.ID = "scheduled1"
|
||||||
|
scheduledMsg.SequenceID = "seq123"
|
||||||
|
scheduledMsg.Time = time.Now().Add(time.Hour).Unix() // Future time makes it scheduled
|
||||||
|
require.Nil(t, s.AddMessage(scheduledMsg))
|
||||||
|
|
||||||
|
// Create a published message with different sequence ID
|
||||||
|
publishedMsg := model.NewDefaultMessage("mytopic", "published message")
|
||||||
|
publishedMsg.ID = "published1"
|
||||||
|
publishedMsg.SequenceID = "seq456"
|
||||||
|
publishedMsg.Time = time.Now().Add(-time.Hour).Unix() // Past time makes it published
|
||||||
|
require.Nil(t, s.AddMessage(publishedMsg))
|
||||||
|
|
||||||
|
// Create a scheduled message in a different topic
|
||||||
|
otherTopicMsg := model.NewDefaultMessage("othertopic", "other scheduled")
|
||||||
|
otherTopicMsg.ID = "other1"
|
||||||
|
otherTopicMsg.SequenceID = "seq123" // Same sequence ID as scheduledMsg
|
||||||
|
otherTopicMsg.Time = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(otherTopicMsg))
|
||||||
|
|
||||||
|
// Verify all messages exist (including scheduled)
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
|
||||||
|
messages, err = s.Messages("othertopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
|
||||||
|
// Delete scheduled message by sequence ID and verify returned IDs
|
||||||
|
deletedIDs, err := s.DeleteScheduledBySequenceID("mytopic", "seq123")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(deletedIDs))
|
||||||
|
require.Equal(t, "scheduled1", deletedIDs[0])
|
||||||
|
|
||||||
|
// Verify scheduled message is deleted
|
||||||
|
messages, err = s.Messages("mytopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "published message", messages[0].Message)
|
||||||
|
|
||||||
|
// Verify other topic's message still exists (topic-scoped deletion)
|
||||||
|
messages, err = s.Messages("othertopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "other scheduled", messages[0].Message)
|
||||||
|
|
||||||
|
// Deleting non-existent sequence ID should return empty list
|
||||||
|
deletedIDs, err = s.DeleteScheduledBySequenceID("mytopic", "nonexistent")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Empty(t, deletedIDs)
|
||||||
|
|
||||||
|
// Deleting published message should not affect it (only deletes unpublished)
|
||||||
|
deletedIDs, err = s.DeleteScheduledBySequenceID("mytopic", "seq456")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Empty(t, deletedIDs)
|
||||||
|
|
||||||
|
messages, err = s.Messages("mytopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "published message", messages[0].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessageByID(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Add a message
|
||||||
|
m := model.NewDefaultMessage("mytopic", "some message")
|
||||||
|
m.Title = "some title"
|
||||||
|
m.Priority = 4
|
||||||
|
m.Tags = []string{"tag1", "tag2"}
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
// Retrieve by ID
|
||||||
|
retrieved, err := s.Message(m.ID)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, m.ID, retrieved.ID)
|
||||||
|
require.Equal(t, "mytopic", retrieved.Topic)
|
||||||
|
require.Equal(t, "some message", retrieved.Message)
|
||||||
|
require.Equal(t, "some title", retrieved.Title)
|
||||||
|
require.Equal(t, 4, retrieved.Priority)
|
||||||
|
require.Equal(t, []string{"tag1", "tag2"}, retrieved.Tags)
|
||||||
|
|
||||||
|
// Non-existent ID returns ErrMessageNotFound
|
||||||
|
_, err = s.Message("doesnotexist")
|
||||||
|
require.Equal(t, model.ErrMessageNotFound, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MarkPublished(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Add a scheduled message (future time -> unpublished)
|
||||||
|
m := model.NewDefaultMessage("mytopic", "scheduled message")
|
||||||
|
m.Time = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
// Verify it does not appear in non-scheduled queries
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 0, len(messages))
|
||||||
|
|
||||||
|
// Verify it does appear in scheduled queries
|
||||||
|
messages, err = s.Messages("mytopic", model.SinceAllMessages, true)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
|
||||||
|
// Mark as published
|
||||||
|
require.Nil(t, s.MarkPublished(m))
|
||||||
|
|
||||||
|
// Now it should appear in non-scheduled queries too
|
||||||
|
messages, err = s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "scheduled message", messages[0].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_ExpireMessages(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Add messages to two topics
|
||||||
|
m1 := model.NewDefaultMessage("topic1", "message 1")
|
||||||
|
m1.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
m2 := model.NewDefaultMessage("topic1", "message 2")
|
||||||
|
m2.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
m3 := model.NewDefaultMessage("topic2", "message 3")
|
||||||
|
m3.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
require.Nil(t, s.AddMessage(m3))
|
||||||
|
|
||||||
|
// Verify all messages exist
|
||||||
|
messages, err := s.Messages("topic1", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
messages, err = s.Messages("topic2", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
|
||||||
|
// Expire topic1 messages
|
||||||
|
require.Nil(t, s.ExpireMessages("topic1"))
|
||||||
|
|
||||||
|
// topic1 messages should now be expired (expires set to past)
|
||||||
|
expiredIDs, err := s.MessagesExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(expiredIDs))
|
||||||
|
sort.Strings(expiredIDs)
|
||||||
|
expectedIDs := []string{m1.ID, m2.ID}
|
||||||
|
sort.Strings(expectedIDs)
|
||||||
|
require.Equal(t, expectedIDs, expiredIDs)
|
||||||
|
|
||||||
|
// topic2 should be unaffected
|
||||||
|
messages, err = s.Messages("topic2", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "message 3", messages[0].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MarkAttachmentsDeleted(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Add a message with an expired attachment (file needs cleanup)
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "old file")
|
||||||
|
m1.ID = "msg1"
|
||||||
|
m1.SequenceID = "msg1"
|
||||||
|
m1.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
m1.Attachment = &model.Attachment{
|
||||||
|
Name: "old.pdf",
|
||||||
|
Type: "application/pdf",
|
||||||
|
Size: 50000,
|
||||||
|
Expires: time.Now().Add(-time.Hour).Unix(), // Expired
|
||||||
|
URL: "https://ntfy.sh/file/old.pdf",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
|
||||||
|
// Add a message with another expired attachment
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "another old file")
|
||||||
|
m2.ID = "msg2"
|
||||||
|
m2.SequenceID = "msg2"
|
||||||
|
m2.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
m2.Attachment = &model.Attachment{
|
||||||
|
Name: "another.pdf",
|
||||||
|
Type: "application/pdf",
|
||||||
|
Size: 30000,
|
||||||
|
Expires: time.Now().Add(-time.Hour).Unix(), // Expired
|
||||||
|
URL: "https://ntfy.sh/file/another.pdf",
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
|
||||||
|
// Both should show as expired attachments needing cleanup
|
||||||
|
ids, err := s.AttachmentsExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(ids))
|
||||||
|
|
||||||
|
// Mark msg1's attachment as deleted (file cleaned up)
|
||||||
|
require.Nil(t, s.MarkAttachmentsDeleted("msg1"))
|
||||||
|
|
||||||
|
// Now only msg2 should show as needing cleanup
|
||||||
|
ids, err = s.AttachmentsExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(ids))
|
||||||
|
require.Equal(t, "msg2", ids[0])
|
||||||
|
|
||||||
|
// Mark msg2 too
|
||||||
|
require.Nil(t, s.MarkAttachmentsDeleted("msg2"))
|
||||||
|
|
||||||
|
// No more expired attachments to clean up
|
||||||
|
ids, err = s.AttachmentsExpired()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 0, len(ids))
|
||||||
|
|
||||||
|
// Messages themselves still exist
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_Stats(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Initial stats should be zero
|
||||||
|
messages, err := s.Stats()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(0), messages)
|
||||||
|
|
||||||
|
// Update stats
|
||||||
|
require.Nil(t, s.UpdateStats(42))
|
||||||
|
messages, err = s.Stats()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(42), messages)
|
||||||
|
|
||||||
|
// Update again (overwrites)
|
||||||
|
require.Nil(t, s.UpdateStats(100))
|
||||||
|
messages, err = s.Stats()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(100), messages)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_AddMessages(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Batch add multiple messages
|
||||||
|
msgs := []*model.Message{
|
||||||
|
model.NewDefaultMessage("mytopic", "batch 1"),
|
||||||
|
model.NewDefaultMessage("mytopic", "batch 2"),
|
||||||
|
model.NewDefaultMessage("othertopic", "batch 3"),
|
||||||
|
}
|
||||||
|
require.Nil(t, s.AddMessages(msgs))
|
||||||
|
|
||||||
|
// Verify all were inserted
|
||||||
|
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 2, len(messages))
|
||||||
|
|
||||||
|
messages, err = s.Messages("othertopic", model.SinceAllMessages, false)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(messages))
|
||||||
|
require.Equal(t, "batch 3", messages[0].Message)
|
||||||
|
|
||||||
|
// Empty batch should succeed
|
||||||
|
require.Nil(t, s.AddMessages([]*model.Message{}))
|
||||||
|
|
||||||
|
// Batch with invalid event type should fail
|
||||||
|
badMsgs := []*model.Message{
|
||||||
|
model.NewKeepaliveMessage("mytopic"),
|
||||||
|
}
|
||||||
|
require.NotNil(t, s.AddMessages(badMsgs))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessagesDue(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Add a message scheduled in the past (i.e. it's due now)
|
||||||
|
m1 := model.NewDefaultMessage("mytopic", "due message")
|
||||||
|
m1.Time = time.Now().Add(-time.Second).Unix()
|
||||||
|
// Set expires in the future so it doesn't get pruned
|
||||||
|
m1.Expires = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m1))
|
||||||
|
|
||||||
|
// Add a message scheduled in the future (not due)
|
||||||
|
m2 := model.NewDefaultMessage("mytopic", "future message")
|
||||||
|
m2.Time = time.Now().Add(time.Hour).Unix()
|
||||||
|
require.Nil(t, s.AddMessage(m2))
|
||||||
|
|
||||||
|
// Mark m1 as published so it won't be "due"
|
||||||
|
// (MessagesDue returns unpublished messages whose time <= now)
|
||||||
|
// m1 is auto-published (time <= now), so it should not be due
|
||||||
|
// m2 is unpublished (time in future), not due yet
|
||||||
|
due, err := s.MessagesDue()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 0, len(due))
|
||||||
|
|
||||||
|
// Add a message that was explicitly scheduled in the past but time has "arrived"
|
||||||
|
// We need to manipulate the database to create a truly "due" message:
|
||||||
|
// a message with published=false and time <= now
|
||||||
|
m3 := model.NewDefaultMessage("mytopic", "truly due message")
|
||||||
|
m3.Time = time.Now().Add(2 * time.Second).Unix() // 2 seconds from now
|
||||||
|
require.Nil(t, s.AddMessage(m3))
|
||||||
|
|
||||||
|
// Not due yet
|
||||||
|
due, err = s.MessagesDue()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 0, len(due))
|
||||||
|
|
||||||
|
// Wait for it to become due
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
due, err = s.MessagesDue()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, 1, len(due))
|
||||||
|
require.Equal(t, "truly due message", due[0].Message)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_MessageFieldRoundTrip(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, s *message.Cache) {
|
||||||
|
// Create a message with all fields populated
|
||||||
|
m := model.NewDefaultMessage("mytopic", "hello world")
|
||||||
|
m.SequenceID = "custom_seq_id"
|
||||||
|
m.Title = "A Title"
|
||||||
|
m.Priority = 4
|
||||||
|
m.Tags = []string{"warning", "srv01"}
|
||||||
|
m.Click = "https://example.com/click"
|
||||||
|
m.Icon = "https://example.com/icon.png"
|
||||||
|
m.Actions = []*model.Action{
|
||||||
|
{
|
||||||
|
ID: "action1",
|
||||||
|
Action: "view",
|
||||||
|
Label: "Open Site",
|
||||||
|
URL: "https://example.com",
|
||||||
|
Clear: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "action2",
|
||||||
|
Action: "http",
|
||||||
|
Label: "Call Webhook",
|
||||||
|
URL: "https://example.com/hook",
|
||||||
|
Method: "PUT",
|
||||||
|
Headers: map[string]string{"X-Token": "secret"},
|
||||||
|
Body: `{"key":"value"}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
m.ContentType = "text/markdown"
|
||||||
|
m.Encoding = "base64"
|
||||||
|
m.Sender = netip.MustParseAddr("9.8.7.6")
|
||||||
|
m.User = "u_TestUser123"
|
||||||
|
require.Nil(t, s.AddMessage(m))
|
||||||
|
|
||||||
|
// Retrieve and verify every field
|
||||||
|
retrieved, err := s.Message(m.ID)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, m.ID, retrieved.ID)
|
||||||
|
require.Equal(t, "custom_seq_id", retrieved.SequenceID)
|
||||||
|
require.Equal(t, m.Time, retrieved.Time)
|
||||||
|
require.Equal(t, m.Expires, retrieved.Expires)
|
||||||
|
require.Equal(t, model.MessageEvent, retrieved.Event)
|
||||||
|
require.Equal(t, "mytopic", retrieved.Topic)
|
||||||
|
require.Equal(t, "hello world", retrieved.Message)
|
||||||
|
require.Equal(t, "A Title", retrieved.Title)
|
||||||
|
require.Equal(t, 4, retrieved.Priority)
|
||||||
|
require.Equal(t, []string{"warning", "srv01"}, retrieved.Tags)
|
||||||
|
require.Equal(t, "https://example.com/click", retrieved.Click)
|
||||||
|
require.Equal(t, "https://example.com/icon.png", retrieved.Icon)
|
||||||
|
require.Equal(t, "text/markdown", retrieved.ContentType)
|
||||||
|
require.Equal(t, "base64", retrieved.Encoding)
|
||||||
|
require.Equal(t, netip.MustParseAddr("9.8.7.6"), retrieved.Sender)
|
||||||
|
require.Equal(t, "u_TestUser123", retrieved.User)
|
||||||
|
|
||||||
|
// Verify actions round-trip
|
||||||
|
require.Equal(t, 2, len(retrieved.Actions))
|
||||||
|
|
||||||
|
require.Equal(t, "action1", retrieved.Actions[0].ID)
|
||||||
|
require.Equal(t, "view", retrieved.Actions[0].Action)
|
||||||
|
require.Equal(t, "Open Site", retrieved.Actions[0].Label)
|
||||||
|
require.Equal(t, "https://example.com", retrieved.Actions[0].URL)
|
||||||
|
require.Equal(t, true, retrieved.Actions[0].Clear)
|
||||||
|
|
||||||
|
require.Equal(t, "action2", retrieved.Actions[1].ID)
|
||||||
|
require.Equal(t, "http", retrieved.Actions[1].Action)
|
||||||
|
require.Equal(t, "Call Webhook", retrieved.Actions[1].Label)
|
||||||
|
require.Equal(t, "https://example.com/hook", retrieved.Actions[1].URL)
|
||||||
|
require.Equal(t, "PUT", retrieved.Actions[1].Method)
|
||||||
|
require.Equal(t, "secret", retrieved.Actions[1].Headers["X-Token"])
|
||||||
|
require.Equal(t, `{"key":"value"}`, retrieved.Actions[1].Body)
|
||||||
|
})
|
||||||
|
}
|
||||||
212
model/model.go
Normal file
212
model/model.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/netip"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// List of possible events
|
||||||
|
const (
|
||||||
|
OpenEvent = "open"
|
||||||
|
KeepaliveEvent = "keepalive"
|
||||||
|
MessageEvent = "message"
|
||||||
|
MessageDeleteEvent = "message_delete"
|
||||||
|
MessageClearEvent = "message_clear"
|
||||||
|
PollRequestEvent = "poll_request"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessageIDLength is the length of a randomly generated message ID
|
||||||
|
const MessageIDLength = 12
|
||||||
|
|
||||||
|
// Errors for message operations
|
||||||
|
var (
|
||||||
|
ErrUnexpectedMessageType = errors.New("unexpected message type")
|
||||||
|
ErrMessageNotFound = errors.New("message not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Message represents a message published to a topic
|
||||||
|
type Message struct {
|
||||||
|
ID string `json:"id"` // Random message ID
|
||||||
|
SequenceID string `json:"sequence_id,omitempty"` // Message sequence ID for updating message contents (omitted if same as ID)
|
||||||
|
Time int64 `json:"time"` // Unix time in seconds
|
||||||
|
Expires int64 `json:"expires,omitempty"` // Unix time in seconds (not required for open/keepalive)
|
||||||
|
Event string `json:"event"` // One of the above
|
||||||
|
Topic string `json:"topic"`
|
||||||
|
Title string `json:"title,omitempty"`
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
Tags []string `json:"tags,omitempty"`
|
||||||
|
Click string `json:"click,omitempty"`
|
||||||
|
Icon string `json:"icon,omitempty"`
|
||||||
|
Actions []*Action `json:"actions,omitempty"`
|
||||||
|
Attachment *Attachment `json:"attachment,omitempty"`
|
||||||
|
PollID string `json:"poll_id,omitempty"`
|
||||||
|
ContentType string `json:"content_type,omitempty"` // text/plain by default (if empty), or text/markdown
|
||||||
|
Encoding string `json:"encoding,omitempty"` // Empty for raw UTF-8, or "base64" for encoded bytes
|
||||||
|
Sender netip.Addr `json:"-"` // IP address of uploader, used for rate limiting
|
||||||
|
User string `json:"-"` // UserID of the uploader, used to associated attachments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context returns a log context for the message
|
||||||
|
func (m *Message) Context() log.Context {
|
||||||
|
fields := map[string]any{
|
||||||
|
"topic": m.Topic,
|
||||||
|
"message_id": m.ID,
|
||||||
|
"message_sequence_id": m.SequenceID,
|
||||||
|
"message_time": m.Time,
|
||||||
|
"message_event": m.Event,
|
||||||
|
"message_body_size": len(m.Message),
|
||||||
|
}
|
||||||
|
if m.Sender.IsValid() {
|
||||||
|
fields["message_sender"] = m.Sender.String()
|
||||||
|
}
|
||||||
|
if m.User != "" {
|
||||||
|
fields["message_user"] = m.User
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForJSON returns a copy of the message suitable for JSON output.
|
||||||
|
// It clears the SequenceID if it equals the ID to reduce redundancy.
|
||||||
|
func (m *Message) ForJSON() *Message {
|
||||||
|
if m.SequenceID == m.ID {
|
||||||
|
clone := *m
|
||||||
|
clone.SequenceID = ""
|
||||||
|
return &clone
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attachment represents a file attachment on a message
|
||||||
|
type Attachment struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
Expires int64 `json:"expires,omitempty"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Action represents a user-defined action on a message
|
||||||
|
type Action struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Action string `json:"action"` // "view", "broadcast", "http", or "copy"
|
||||||
|
Label string `json:"label"` // action button label
|
||||||
|
Clear bool `json:"clear"` // clear notification after successful execution
|
||||||
|
URL string `json:"url,omitempty"` // used in "view" and "http" actions
|
||||||
|
Method string `json:"method,omitempty"` // used in "http" action, default is POST (!)
|
||||||
|
Headers map[string]string `json:"headers,omitempty"` // used in "http" action
|
||||||
|
Body string `json:"body,omitempty"` // used in "http" action
|
||||||
|
Intent string `json:"intent,omitempty"` // used in "broadcast" action
|
||||||
|
Extras map[string]string `json:"extras,omitempty"` // used in "broadcast" action
|
||||||
|
Value string `json:"value,omitempty"` // used in "copy" action
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAction creates a new action with initialized maps
|
||||||
|
func NewAction() *Action {
|
||||||
|
return &Action{
|
||||||
|
Headers: make(map[string]string),
|
||||||
|
Extras: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMessage creates a new message with the current timestamp
|
||||||
|
func NewMessage(event, topic, msg string) *Message {
|
||||||
|
return &Message{
|
||||||
|
ID: util.RandomString(MessageIDLength),
|
||||||
|
Time: time.Now().Unix(),
|
||||||
|
Event: event,
|
||||||
|
Topic: topic,
|
||||||
|
Message: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOpenMessage is a convenience method to create an open message
|
||||||
|
func NewOpenMessage(topic string) *Message {
|
||||||
|
return NewMessage(OpenEvent, topic, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKeepaliveMessage is a convenience method to create a keepalive message
|
||||||
|
func NewKeepaliveMessage(topic string) *Message {
|
||||||
|
return NewMessage(KeepaliveEvent, topic, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultMessage is a convenience method to create a notification message
|
||||||
|
func NewDefaultMessage(topic, msg string) *Message {
|
||||||
|
return NewMessage(MessageEvent, topic, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewActionMessage creates a new action message (message_delete or message_clear)
|
||||||
|
func NewActionMessage(event, topic, sequenceID string) *Message {
|
||||||
|
m := NewMessage(event, topic, "")
|
||||||
|
m.SequenceID = sequenceID
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPollRequestMessage is a convenience method to create a poll request message
|
||||||
|
func NewPollRequestMessage(topic, pollID string) *Message {
|
||||||
|
m := NewMessage(PollRequestEvent, topic, "New message")
|
||||||
|
m.PollID = pollID
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidMessageID returns true if the given string is a valid message ID
|
||||||
|
func ValidMessageID(s string) bool {
|
||||||
|
return util.ValidRandomString(s, MessageIDLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SinceMarker represents a point in time or message ID from which to retrieve messages
|
||||||
|
type SinceMarker struct {
|
||||||
|
time time.Time
|
||||||
|
id string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSinceTime creates a new SinceMarker from a Unix timestamp
|
||||||
|
func NewSinceTime(timestamp int64) SinceMarker {
|
||||||
|
return SinceMarker{time.Unix(timestamp, 0), ""}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSinceID creates a new SinceMarker from a message ID
|
||||||
|
func NewSinceID(id string) SinceMarker {
|
||||||
|
return SinceMarker{time.Unix(0, 0), id}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAll returns true if this is the "all messages" marker
|
||||||
|
func (t SinceMarker) IsAll() bool {
|
||||||
|
return t == SinceAllMessages
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNone returns true if this is the "no messages" marker
|
||||||
|
func (t SinceMarker) IsNone() bool {
|
||||||
|
return t == SinceNoMessages
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLatest returns true if this is the "latest message" marker
|
||||||
|
func (t SinceMarker) IsLatest() bool {
|
||||||
|
return t == SinceLatestMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsID returns true if this marker references a specific message ID
|
||||||
|
func (t SinceMarker) IsID() bool {
|
||||||
|
return t.id != "" && t.id != SinceLatestMessage.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time returns the time component of the marker
|
||||||
|
func (t SinceMarker) Time() time.Time {
|
||||||
|
return t.time
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the message ID component of the marker
|
||||||
|
func (t SinceMarker) ID() string {
|
||||||
|
return t.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common SinceMarker values for subscribing to messages
|
||||||
|
var (
|
||||||
|
SinceAllMessages = SinceMarker{time.Unix(0, 0), ""}
|
||||||
|
SinceNoMessages = SinceMarker{time.Unix(1, 0), ""}
|
||||||
|
SinceLatestMessage = SinceMarker{time.Unix(0, 0), "latest"}
|
||||||
|
)
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -39,7 +40,7 @@ type actionParser struct {
|
|||||||
// parseActions parses the actions string as described in https://ntfy.sh/docs/publish/#action-buttons.
|
// parseActions parses the actions string as described in https://ntfy.sh/docs/publish/#action-buttons.
|
||||||
// It supports both a JSON representation (if the string begins with "[", see parseActionsFromJSON),
|
// It supports both a JSON representation (if the string begins with "[", see parseActionsFromJSON),
|
||||||
// and the "simple" format, which is more human-readable, but harder to parse (see parseActionsFromSimple).
|
// and the "simple" format, which is more human-readable, but harder to parse (see parseActionsFromSimple).
|
||||||
func parseActions(s string) (actions []*action, err error) {
|
func parseActions(s string) (actions []*model.Action, err error) {
|
||||||
// Parse JSON or simple format
|
// Parse JSON or simple format
|
||||||
s = strings.TrimSpace(s)
|
s = strings.TrimSpace(s)
|
||||||
if strings.HasPrefix(s, "[") {
|
if strings.HasPrefix(s, "[") {
|
||||||
@@ -80,8 +81,8 @@ func parseActions(s string) (actions []*action, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parseActionsFromJSON converts a JSON array into an array of actions
|
// parseActionsFromJSON converts a JSON array into an array of actions
|
||||||
func parseActionsFromJSON(s string) ([]*action, error) {
|
func parseActionsFromJSON(s string) ([]*model.Action, error) {
|
||||||
actions := make([]*action, 0)
|
actions := make([]*model.Action, 0)
|
||||||
if err := json.Unmarshal([]byte(s), &actions); err != nil {
|
if err := json.Unmarshal([]byte(s), &actions); err != nil {
|
||||||
return nil, fmt.Errorf("JSON error: %w", err)
|
return nil, fmt.Errorf("JSON error: %w", err)
|
||||||
}
|
}
|
||||||
@@ -107,7 +108,7 @@ func parseActionsFromJSON(s string) ([]*action, error) {
|
|||||||
// https://github.com/adampresley/sample-ini-parser/blob/master/services/lexer/lexer/Lexer.go
|
// https://github.com/adampresley/sample-ini-parser/blob/master/services/lexer/lexer/Lexer.go
|
||||||
// https://github.com/benbjohnson/sql-parser/blob/master/scanner.go
|
// https://github.com/benbjohnson/sql-parser/blob/master/scanner.go
|
||||||
// https://blog.gopheracademy.com/advent-2014/parsers-lexers/
|
// https://blog.gopheracademy.com/advent-2014/parsers-lexers/
|
||||||
func parseActionsFromSimple(s string) ([]*action, error) {
|
func parseActionsFromSimple(s string) ([]*model.Action, error) {
|
||||||
if !utf8.ValidString(s) {
|
if !utf8.ValidString(s) {
|
||||||
return nil, errors.New("invalid utf-8 string")
|
return nil, errors.New("invalid utf-8 string")
|
||||||
}
|
}
|
||||||
@@ -119,8 +120,8 @@ func parseActionsFromSimple(s string) ([]*action, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse loops trough parseAction() until the end of the string is reached
|
// Parse loops trough parseAction() until the end of the string is reached
|
||||||
func (p *actionParser) Parse() ([]*action, error) {
|
func (p *actionParser) Parse() ([]*model.Action, error) {
|
||||||
actions := make([]*action, 0)
|
actions := make([]*model.Action, 0)
|
||||||
for !p.eof() {
|
for !p.eof() {
|
||||||
a, err := p.parseAction()
|
a, err := p.parseAction()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -134,8 +135,8 @@ func (p *actionParser) Parse() ([]*action, error) {
|
|||||||
// parseAction parses the individual sections of an action using parseSection into key/value pairs,
|
// parseAction parses the individual sections of an action using parseSection into key/value pairs,
|
||||||
// and then uses populateAction to interpret the keys/values. The function terminates
|
// and then uses populateAction to interpret the keys/values. The function terminates
|
||||||
// when EOF or ";" is reached.
|
// when EOF or ";" is reached.
|
||||||
func (p *actionParser) parseAction() (*action, error) {
|
func (p *actionParser) parseAction() (*model.Action, error) {
|
||||||
a := newAction()
|
a := model.NewAction()
|
||||||
section := 0
|
section := 0
|
||||||
for {
|
for {
|
||||||
key, value, last, err := p.parseSection()
|
key, value, last, err := p.parseSection()
|
||||||
@@ -155,7 +156,7 @@ func (p *actionParser) parseAction() (*action, error) {
|
|||||||
|
|
||||||
// populateAction is the "business logic" of the parser. It applies the key/value
|
// populateAction is the "business logic" of the parser. It applies the key/value
|
||||||
// pair to the action instance.
|
// pair to the action instance.
|
||||||
func populateAction(newAction *action, section int, key, value string) error {
|
func populateAction(newAction *model.Action, section int, key, value string) error {
|
||||||
// Auto-expand keys based on their index
|
// Auto-expand keys based on their index
|
||||||
if key == "" && section == 0 {
|
if key == "" && section == 0 {
|
||||||
key = "action"
|
key = "action"
|
||||||
|
|||||||
@@ -95,6 +95,7 @@ type Config struct {
|
|||||||
ListenUnixMode fs.FileMode
|
ListenUnixMode fs.FileMode
|
||||||
KeyFile string
|
KeyFile string
|
||||||
CertFile string
|
CertFile string
|
||||||
|
DatabaseURL string // PostgreSQL connection string (e.g. "postgres://user:pass@host:5432/ntfy")
|
||||||
FirebaseKeyFile string
|
FirebaseKeyFile string
|
||||||
CacheFile string
|
CacheFile string
|
||||||
CacheDuration time.Duration
|
CacheDuration time.Duration
|
||||||
@@ -199,6 +200,7 @@ func NewConfig() *Config {
|
|||||||
ListenUnixMode: 0,
|
ListenUnixMode: 0,
|
||||||
KeyFile: "",
|
KeyFile: "",
|
||||||
CertFile: "",
|
CertFile: "",
|
||||||
|
DatabaseURL: "",
|
||||||
FirebaseKeyFile: "",
|
FirebaseKeyFile: "",
|
||||||
CacheFile: "",
|
CacheFile: "",
|
||||||
CacheDuration: DefaultCacheDuration,
|
CacheDuration: DefaultCacheDuration,
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -13,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, messageIDLength))
|
fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, model.MessageIDLength))
|
||||||
errInvalidFileID = errors.New("invalid file ID")
|
errInvalidFileID = errors.New("invalid file ID")
|
||||||
errFileExists = errors.New("file exists")
|
errFileExists = errors.New("file exists")
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/emersion/go-smtp"
|
"github.com/emersion/go-smtp"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -55,12 +56,12 @@ func logvr(v *visitor, r *http.Request) *log.Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// logvrm creates a new log event with HTTP request, visitor fields and message fields
|
// logvrm creates a new log event with HTTP request, visitor fields and message fields
|
||||||
func logvrm(v *visitor, r *http.Request, m *message) *log.Event {
|
func logvrm(v *visitor, r *http.Request, m *model.Message) *log.Event {
|
||||||
return logvr(v, r).With(m)
|
return logvr(v, r).With(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// logvrm creates a new log event with visitor fields and message fields
|
// logvrm creates a new log event with visitor fields and message fields
|
||||||
func logvm(v *visitor, m *message) *log.Event {
|
func logvm(v *visitor, m *model.Message) *log.Event {
|
||||||
return logv(v).With(m)
|
return logv(v).With(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,825 +0,0 @@
|
|||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"net/netip"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSqliteCache_Messages(t *testing.T) {
|
|
||||||
testCacheMessages(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Messages(t *testing.T) {
|
|
||||||
testCacheMessages(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheMessages(t *testing.T, c *messageCache) {
|
|
||||||
m1 := newDefaultMessage("mytopic", "my message")
|
|
||||||
m1.Time = 1
|
|
||||||
|
|
||||||
m2 := newDefaultMessage("mytopic", "my other message")
|
|
||||||
m2.Time = 2
|
|
||||||
|
|
||||||
require.Nil(t, c.AddMessage(m1))
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("example", "my example message")))
|
|
||||||
require.Nil(t, c.AddMessage(m2))
|
|
||||||
|
|
||||||
// Adding invalid
|
|
||||||
require.Equal(t, errUnexpectedMessageType, c.AddMessage(newKeepaliveMessage("mytopic"))) // These should not be added!
|
|
||||||
require.Equal(t, errUnexpectedMessageType, c.AddMessage(newOpenMessage("example"))) // These should not be added!
|
|
||||||
|
|
||||||
// mytopic: count
|
|
||||||
counts, err := c.MessageCounts()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 2, counts["mytopic"])
|
|
||||||
|
|
||||||
// mytopic: since all
|
|
||||||
messages, _ := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Equal(t, 2, len(messages))
|
|
||||||
require.Equal(t, "my message", messages[0].Message)
|
|
||||||
require.Equal(t, "mytopic", messages[0].Topic)
|
|
||||||
require.Equal(t, messageEvent, messages[0].Event)
|
|
||||||
require.Equal(t, "", messages[0].Title)
|
|
||||||
require.Equal(t, 0, messages[0].Priority)
|
|
||||||
require.Nil(t, messages[0].Tags)
|
|
||||||
require.Equal(t, "my other message", messages[1].Message)
|
|
||||||
|
|
||||||
// mytopic: since none
|
|
||||||
messages, _ = c.Messages("mytopic", sinceNoMessages, false)
|
|
||||||
require.Empty(t, messages)
|
|
||||||
|
|
||||||
// mytopic: since m1 (by ID)
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceID(m1.ID), false)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, m2.ID, messages[0].ID)
|
|
||||||
require.Equal(t, "my other message", messages[0].Message)
|
|
||||||
require.Equal(t, "mytopic", messages[0].Topic)
|
|
||||||
|
|
||||||
// mytopic: since 2
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceTime(2), false)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "my other message", messages[0].Message)
|
|
||||||
|
|
||||||
// mytopic: latest
|
|
||||||
messages, _ = c.Messages("mytopic", sinceLatestMessage, false)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "my other message", messages[0].Message)
|
|
||||||
|
|
||||||
// example: count
|
|
||||||
counts, err = c.MessageCounts()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, counts["example"])
|
|
||||||
|
|
||||||
// example: since all
|
|
||||||
messages, _ = c.Messages("example", sinceAllMessages, false)
|
|
||||||
require.Equal(t, "my example message", messages[0].Message)
|
|
||||||
|
|
||||||
// non-existing: count
|
|
||||||
counts, err = c.MessageCounts()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 0, counts["doesnotexist"])
|
|
||||||
|
|
||||||
// non-existing: since all
|
|
||||||
messages, _ = c.Messages("doesnotexist", sinceAllMessages, false)
|
|
||||||
require.Empty(t, messages)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_MessagesLock(t *testing.T) {
|
|
||||||
testCacheMessagesLock(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_MessagesLock(t *testing.T) {
|
|
||||||
testCacheMessagesLock(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheMessagesLock(t *testing.T, c *messageCache) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for i := 0; i < 5000; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
assert.Nil(t, c.AddMessage(newDefaultMessage("mytopic", "test message")))
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_MessagesScheduled(t *testing.T) {
|
|
||||||
testCacheMessagesScheduled(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_MessagesScheduled(t *testing.T) {
|
|
||||||
testCacheMessagesScheduled(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheMessagesScheduled(t *testing.T, c *messageCache) {
|
|
||||||
m1 := newDefaultMessage("mytopic", "message 1")
|
|
||||||
m2 := newDefaultMessage("mytopic", "message 2")
|
|
||||||
m2.Time = time.Now().Add(time.Hour).Unix()
|
|
||||||
m3 := newDefaultMessage("mytopic", "message 3")
|
|
||||||
m3.Time = time.Now().Add(time.Minute).Unix() // earlier than m2!
|
|
||||||
m4 := newDefaultMessage("mytopic2", "message 4")
|
|
||||||
m4.Time = time.Now().Add(time.Minute).Unix()
|
|
||||||
require.Nil(t, c.AddMessage(m1))
|
|
||||||
require.Nil(t, c.AddMessage(m2))
|
|
||||||
require.Nil(t, c.AddMessage(m3))
|
|
||||||
|
|
||||||
messages, _ := c.Messages("mytopic", sinceAllMessages, false) // exclude scheduled
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "message 1", messages[0].Message)
|
|
||||||
|
|
||||||
messages, _ = c.Messages("mytopic", sinceAllMessages, true) // include scheduled
|
|
||||||
require.Equal(t, 3, len(messages))
|
|
||||||
require.Equal(t, "message 1", messages[0].Message)
|
|
||||||
require.Equal(t, "message 3", messages[1].Message) // Order!
|
|
||||||
require.Equal(t, "message 2", messages[2].Message)
|
|
||||||
|
|
||||||
messages, _ = c.MessagesDue()
|
|
||||||
require.Empty(t, messages)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Topics(t *testing.T) {
|
|
||||||
testCacheTopics(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Topics(t *testing.T) {
|
|
||||||
testCacheTopics(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheTopics(t *testing.T, c *messageCache) {
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("topic1", "my example message")))
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("topic2", "message 1")))
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("topic2", "message 2")))
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("topic2", "message 3")))
|
|
||||||
|
|
||||||
topics, err := c.Topics()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
require.Equal(t, 2, len(topics))
|
|
||||||
require.Equal(t, "topic1", topics["topic1"].ID)
|
|
||||||
require.Equal(t, "topic2", topics["topic2"].ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_MessagesTagsPrioAndTitle(t *testing.T) {
|
|
||||||
testCacheMessagesTagsPrioAndTitle(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_MessagesTagsPrioAndTitle(t *testing.T) {
|
|
||||||
testCacheMessagesTagsPrioAndTitle(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheMessagesTagsPrioAndTitle(t *testing.T, c *messageCache) {
|
|
||||||
m := newDefaultMessage("mytopic", "some message")
|
|
||||||
m.Tags = []string{"tag1", "tag2"}
|
|
||||||
m.Priority = 5
|
|
||||||
m.Title = "some title"
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
messages, _ := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Equal(t, []string{"tag1", "tag2"}, messages[0].Tags)
|
|
||||||
require.Equal(t, 5, messages[0].Priority)
|
|
||||||
require.Equal(t, "some title", messages[0].Title)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_MessagesSinceID(t *testing.T) {
|
|
||||||
testCacheMessagesSinceID(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_MessagesSinceID(t *testing.T) {
|
|
||||||
testCacheMessagesSinceID(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheMessagesSinceID(t *testing.T, c *messageCache) {
|
|
||||||
m1 := newDefaultMessage("mytopic", "message 1")
|
|
||||||
m1.Time = 100
|
|
||||||
m2 := newDefaultMessage("mytopic", "message 2")
|
|
||||||
m2.Time = 200
|
|
||||||
m3 := newDefaultMessage("mytopic", "message 3")
|
|
||||||
m3.Time = time.Now().Add(time.Hour).Unix() // Scheduled, in the future, later than m7 and m5
|
|
||||||
m4 := newDefaultMessage("mytopic", "message 4")
|
|
||||||
m4.Time = 400
|
|
||||||
m5 := newDefaultMessage("mytopic", "message 5")
|
|
||||||
m5.Time = time.Now().Add(time.Minute).Unix() // Scheduled, in the future, later than m7
|
|
||||||
m6 := newDefaultMessage("mytopic", "message 6")
|
|
||||||
m6.Time = 600
|
|
||||||
m7 := newDefaultMessage("mytopic", "message 7")
|
|
||||||
m7.Time = 700
|
|
||||||
|
|
||||||
require.Nil(t, c.AddMessage(m1))
|
|
||||||
require.Nil(t, c.AddMessage(m2))
|
|
||||||
require.Nil(t, c.AddMessage(m3))
|
|
||||||
require.Nil(t, c.AddMessage(m4))
|
|
||||||
require.Nil(t, c.AddMessage(m5))
|
|
||||||
require.Nil(t, c.AddMessage(m6))
|
|
||||||
require.Nil(t, c.AddMessage(m7))
|
|
||||||
|
|
||||||
// Case 1: Since ID exists, exclude scheduled
|
|
||||||
messages, _ := c.Messages("mytopic", newSinceID(m2.ID), false)
|
|
||||||
require.Equal(t, 3, len(messages))
|
|
||||||
require.Equal(t, "message 4", messages[0].Message)
|
|
||||||
require.Equal(t, "message 6", messages[1].Message) // Not scheduled m3/m5!
|
|
||||||
require.Equal(t, "message 7", messages[2].Message)
|
|
||||||
|
|
||||||
// Case 2: Since ID exists, include scheduled
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceID(m2.ID), true)
|
|
||||||
require.Equal(t, 5, len(messages))
|
|
||||||
require.Equal(t, "message 4", messages[0].Message)
|
|
||||||
require.Equal(t, "message 6", messages[1].Message)
|
|
||||||
require.Equal(t, "message 7", messages[2].Message)
|
|
||||||
require.Equal(t, "message 5", messages[3].Message) // Order!
|
|
||||||
require.Equal(t, "message 3", messages[4].Message) // Order!
|
|
||||||
|
|
||||||
// Case 3: Since ID does not exist (-> Return all messages), include scheduled
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceID("doesntexist"), true)
|
|
||||||
require.Equal(t, 7, len(messages))
|
|
||||||
require.Equal(t, "message 1", messages[0].Message)
|
|
||||||
require.Equal(t, "message 2", messages[1].Message)
|
|
||||||
require.Equal(t, "message 4", messages[2].Message)
|
|
||||||
require.Equal(t, "message 6", messages[3].Message)
|
|
||||||
require.Equal(t, "message 7", messages[4].Message)
|
|
||||||
require.Equal(t, "message 5", messages[5].Message) // Order!
|
|
||||||
require.Equal(t, "message 3", messages[6].Message) // Order!
|
|
||||||
|
|
||||||
// Case 4: Since ID exists and is last message (-> Return no messages), exclude scheduled
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceID(m7.ID), false)
|
|
||||||
require.Equal(t, 0, len(messages))
|
|
||||||
|
|
||||||
// Case 5: Since ID exists and is last message (-> Return no messages), include scheduled
|
|
||||||
messages, _ = c.Messages("mytopic", newSinceID(m7.ID), true)
|
|
||||||
require.Equal(t, 2, len(messages))
|
|
||||||
require.Equal(t, "message 5", messages[0].Message)
|
|
||||||
require.Equal(t, "message 3", messages[1].Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Prune(t *testing.T) {
|
|
||||||
testCachePrune(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Prune(t *testing.T) {
|
|
||||||
testCachePrune(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCachePrune(t *testing.T, c *messageCache) {
|
|
||||||
now := time.Now().Unix()
|
|
||||||
|
|
||||||
m1 := newDefaultMessage("mytopic", "my message")
|
|
||||||
m1.Time = now - 10
|
|
||||||
m1.Expires = now - 5
|
|
||||||
|
|
||||||
m2 := newDefaultMessage("mytopic", "my other message")
|
|
||||||
m2.Time = now - 5
|
|
||||||
m2.Expires = now + 5 // In the future
|
|
||||||
|
|
||||||
m3 := newDefaultMessage("another_topic", "and another one")
|
|
||||||
m3.Time = now - 12
|
|
||||||
m3.Expires = now - 2
|
|
||||||
|
|
||||||
require.Nil(t, c.AddMessage(m1))
|
|
||||||
require.Nil(t, c.AddMessage(m2))
|
|
||||||
require.Nil(t, c.AddMessage(m3))
|
|
||||||
|
|
||||||
counts, err := c.MessageCounts()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 2, counts["mytopic"])
|
|
||||||
require.Equal(t, 1, counts["another_topic"])
|
|
||||||
|
|
||||||
expiredMessageIDs, err := c.MessagesExpired()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Nil(t, c.DeleteMessages(expiredMessageIDs...))
|
|
||||||
|
|
||||||
counts, err = c.MessageCounts()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, counts["mytopic"])
|
|
||||||
require.Equal(t, 0, counts["another_topic"])
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "my other message", messages[0].Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Attachments(t *testing.T) {
|
|
||||||
testCacheAttachments(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Attachments(t *testing.T) {
|
|
||||||
testCacheAttachments(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheAttachments(t *testing.T, c *messageCache) {
|
|
||||||
expires1 := time.Now().Add(-4 * time.Hour).Unix() // Expired
|
|
||||||
m := newDefaultMessage("mytopic", "flower for you")
|
|
||||||
m.ID = "m1"
|
|
||||||
m.SequenceID = "m1"
|
|
||||||
m.Sender = netip.MustParseAddr("1.2.3.4")
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "flower.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Size: 5000,
|
|
||||||
Expires: expires1,
|
|
||||||
URL: "https://ntfy.sh/file/AbDeFgJhal.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
expires2 := time.Now().Add(2 * time.Hour).Unix() // Future
|
|
||||||
m = newDefaultMessage("mytopic", "sending you a car")
|
|
||||||
m.ID = "m2"
|
|
||||||
m.SequenceID = "m2"
|
|
||||||
m.Sender = netip.MustParseAddr("1.2.3.4")
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "car.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Size: 10000,
|
|
||||||
Expires: expires2,
|
|
||||||
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
expires3 := time.Now().Add(1 * time.Hour).Unix() // Future
|
|
||||||
m = newDefaultMessage("another-topic", "sending you another car")
|
|
||||||
m.ID = "m3"
|
|
||||||
m.SequenceID = "m3"
|
|
||||||
m.User = "u_BAsbaAa"
|
|
||||||
m.Sender = netip.MustParseAddr("5.6.7.8")
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "another-car.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Size: 20000,
|
|
||||||
Expires: expires3,
|
|
||||||
URL: "https://ntfy.sh/file/zakaDHFW.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 2, len(messages))
|
|
||||||
|
|
||||||
require.Equal(t, "flower for you", messages[0].Message)
|
|
||||||
require.Equal(t, "flower.jpg", messages[0].Attachment.Name)
|
|
||||||
require.Equal(t, "image/jpeg", messages[0].Attachment.Type)
|
|
||||||
require.Equal(t, int64(5000), messages[0].Attachment.Size)
|
|
||||||
require.Equal(t, expires1, messages[0].Attachment.Expires)
|
|
||||||
require.Equal(t, "https://ntfy.sh/file/AbDeFgJhal.jpg", messages[0].Attachment.URL)
|
|
||||||
require.Equal(t, "1.2.3.4", messages[0].Sender.String())
|
|
||||||
|
|
||||||
require.Equal(t, "sending you a car", messages[1].Message)
|
|
||||||
require.Equal(t, "car.jpg", messages[1].Attachment.Name)
|
|
||||||
require.Equal(t, "image/jpeg", messages[1].Attachment.Type)
|
|
||||||
require.Equal(t, int64(10000), messages[1].Attachment.Size)
|
|
||||||
require.Equal(t, expires2, messages[1].Attachment.Expires)
|
|
||||||
require.Equal(t, "https://ntfy.sh/file/aCaRURL.jpg", messages[1].Attachment.URL)
|
|
||||||
require.Equal(t, "1.2.3.4", messages[1].Sender.String())
|
|
||||||
|
|
||||||
size, err := c.AttachmentBytesUsedBySender("1.2.3.4")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, int64(10000), size)
|
|
||||||
|
|
||||||
size, err = c.AttachmentBytesUsedBySender("5.6.7.8")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, int64(0), size) // Accounted to the user, not the IP!
|
|
||||||
|
|
||||||
size, err = c.AttachmentBytesUsedByUser("u_BAsbaAa")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, int64(20000), size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Attachments_Expired(t *testing.T) {
|
|
||||||
testCacheAttachmentsExpired(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Attachments_Expired(t *testing.T) {
|
|
||||||
testCacheAttachmentsExpired(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCacheAttachmentsExpired(t *testing.T, c *messageCache) {
|
|
||||||
m := newDefaultMessage("mytopic", "flower for you")
|
|
||||||
m.ID = "m1"
|
|
||||||
m.SequenceID = "m1"
|
|
||||||
m.Expires = time.Now().Add(time.Hour).Unix()
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
m = newDefaultMessage("mytopic", "message with attachment")
|
|
||||||
m.ID = "m2"
|
|
||||||
m.SequenceID = "m2"
|
|
||||||
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "car.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Size: 10000,
|
|
||||||
Expires: time.Now().Add(2 * time.Hour).Unix(),
|
|
||||||
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
m = newDefaultMessage("mytopic", "message with external attachment")
|
|
||||||
m.ID = "m3"
|
|
||||||
m.SequenceID = "m3"
|
|
||||||
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "car.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Expires: 0, // Unknown!
|
|
||||||
URL: "https://somedomain.com/car.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
m = newDefaultMessage("mytopic2", "message with expired attachment")
|
|
||||||
m.ID = "m4"
|
|
||||||
m.SequenceID = "m4"
|
|
||||||
m.Expires = time.Now().Add(2 * time.Hour).Unix()
|
|
||||||
m.Attachment = &attachment{
|
|
||||||
Name: "expired-car.jpg",
|
|
||||||
Type: "image/jpeg",
|
|
||||||
Size: 20000,
|
|
||||||
Expires: time.Now().Add(-1 * time.Hour).Unix(),
|
|
||||||
URL: "https://ntfy.sh/file/aCaRURL.jpg",
|
|
||||||
}
|
|
||||||
require.Nil(t, c.AddMessage(m))
|
|
||||||
|
|
||||||
ids, err := c.AttachmentsExpired()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(ids))
|
|
||||||
require.Equal(t, "m4", ids[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Migration_From0(t *testing.T) {
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
db, err := sql.Open("sqlite3", filename)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Create "version 0" schema
|
|
||||||
_, err = db.Exec(`
|
|
||||||
BEGIN;
|
|
||||||
CREATE TABLE IF NOT EXISTS messages (
|
|
||||||
id VARCHAR(20) PRIMARY KEY,
|
|
||||||
time INT NOT NULL,
|
|
||||||
topic VARCHAR(64) NOT NULL,
|
|
||||||
message VARCHAR(1024) NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
|
||||||
COMMIT;
|
|
||||||
`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Insert a bunch of messages
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
_, err = db.Exec(`INSERT INTO messages (id, time, topic, message) VALUES (?, ?, ?, ?)`,
|
|
||||||
fmt.Sprintf("abcd%d", i), time.Now().Unix(), "mytopic", fmt.Sprintf("some message %d", i))
|
|
||||||
require.Nil(t, err)
|
|
||||||
}
|
|
||||||
require.Nil(t, db.Close())
|
|
||||||
|
|
||||||
// Create cache to trigger migration
|
|
||||||
c := newSqliteTestCacheFromFile(t, filename, "")
|
|
||||||
checkSchemaVersion(t, c.db)
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 10, len(messages))
|
|
||||||
require.Equal(t, "some message 5", messages[5].Message)
|
|
||||||
require.Equal(t, "", messages[5].Title)
|
|
||||||
require.Nil(t, messages[5].Tags)
|
|
||||||
require.Equal(t, 0, messages[5].Priority)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Migration_From1(t *testing.T) {
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
db, err := sql.Open("sqlite3", filename)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Create "version 1" schema
|
|
||||||
_, err = db.Exec(`
|
|
||||||
CREATE TABLE IF NOT EXISTS messages (
|
|
||||||
id VARCHAR(20) PRIMARY KEY,
|
|
||||||
time INT NOT NULL,
|
|
||||||
topic VARCHAR(64) NOT NULL,
|
|
||||||
message VARCHAR(512) NOT NULL,
|
|
||||||
title VARCHAR(256) NOT NULL,
|
|
||||||
priority INT NOT NULL,
|
|
||||||
tags VARCHAR(256) NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
|
||||||
CREATE TABLE IF NOT EXISTS schemaVersion (
|
|
||||||
id INT PRIMARY KEY,
|
|
||||||
version INT NOT NULL
|
|
||||||
);
|
|
||||||
INSERT INTO schemaVersion (id, version) VALUES (1, 1);
|
|
||||||
`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Insert a bunch of messages
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
_, err = db.Exec(`INSERT INTO messages (id, time, topic, message, title, priority, tags) VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
|
||||||
fmt.Sprintf("abcd%d", i), time.Now().Unix(), "mytopic", fmt.Sprintf("some message %d", i), "", 0, "")
|
|
||||||
require.Nil(t, err)
|
|
||||||
}
|
|
||||||
require.Nil(t, db.Close())
|
|
||||||
|
|
||||||
// Create cache to trigger migration
|
|
||||||
c := newSqliteTestCacheFromFile(t, filename, "")
|
|
||||||
checkSchemaVersion(t, c.db)
|
|
||||||
|
|
||||||
// Add delayed message
|
|
||||||
delayedMessage := newDefaultMessage("mytopic", "some delayed message")
|
|
||||||
delayedMessage.Time = time.Now().Add(time.Minute).Unix()
|
|
||||||
require.Nil(t, c.AddMessage(delayedMessage))
|
|
||||||
|
|
||||||
// 10, not 11!
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 10, len(messages))
|
|
||||||
|
|
||||||
// 11!
|
|
||||||
messages, err = c.Messages("mytopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 11, len(messages))
|
|
||||||
|
|
||||||
// Check that index "idx_topic" exists
|
|
||||||
rows, err := c.db.Query(`SELECT name FROM sqlite_master WHERE type='index' AND name='idx_topic'`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.True(t, rows.Next())
|
|
||||||
var indexName string
|
|
||||||
require.Nil(t, rows.Scan(&indexName))
|
|
||||||
require.Equal(t, "idx_topic", indexName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Migration_From9(t *testing.T) {
|
|
||||||
// This primarily tests the awkward migration that introduces the "expires" column.
|
|
||||||
// The migration logic has to update the column, using the existing "cache-duration" value.
|
|
||||||
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
db, err := sql.Open("sqlite3", filename)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Create "version 8" schema
|
|
||||||
_, err = db.Exec(`
|
|
||||||
BEGIN;
|
|
||||||
CREATE TABLE IF NOT EXISTS messages (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
mid TEXT NOT NULL,
|
|
||||||
time INT NOT NULL,
|
|
||||||
topic TEXT NOT NULL,
|
|
||||||
message TEXT NOT NULL,
|
|
||||||
title TEXT NOT NULL,
|
|
||||||
priority INT NOT NULL,
|
|
||||||
tags TEXT NOT NULL,
|
|
||||||
click TEXT NOT NULL,
|
|
||||||
icon TEXT NOT NULL,
|
|
||||||
actions TEXT NOT NULL,
|
|
||||||
attachment_name TEXT NOT NULL,
|
|
||||||
attachment_type TEXT NOT NULL,
|
|
||||||
attachment_size INT NOT NULL,
|
|
||||||
attachment_expires INT NOT NULL,
|
|
||||||
attachment_url TEXT NOT NULL,
|
|
||||||
sender TEXT NOT NULL,
|
|
||||||
encoding TEXT NOT NULL,
|
|
||||||
published INT NOT NULL
|
|
||||||
);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_mid ON messages (mid);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_time ON messages (time);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_topic ON messages (topic);
|
|
||||||
CREATE TABLE IF NOT EXISTS schemaVersion (
|
|
||||||
id INT PRIMARY KEY,
|
|
||||||
version INT NOT NULL
|
|
||||||
);
|
|
||||||
INSERT INTO schemaVersion (id, version) VALUES (1, 9);
|
|
||||||
COMMIT;
|
|
||||||
`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Insert a bunch of messages
|
|
||||||
insertQuery := `
|
|
||||||
INSERT INTO messages (mid, time, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, sender, encoding, published)
|
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
||||||
`
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
_, err = db.Exec(
|
|
||||||
insertQuery,
|
|
||||||
fmt.Sprintf("abcd%d", i),
|
|
||||||
time.Now().Unix(),
|
|
||||||
"mytopic",
|
|
||||||
fmt.Sprintf("some message %d", i),
|
|
||||||
"", // title
|
|
||||||
0, // priority
|
|
||||||
"", // tags
|
|
||||||
"", // click
|
|
||||||
"", // icon
|
|
||||||
"", // actions
|
|
||||||
"", // attachment_name
|
|
||||||
"", // attachment_type
|
|
||||||
0, // attachment_size
|
|
||||||
0, // attachment_type
|
|
||||||
"", // attachment_url
|
|
||||||
"9.9.9.9", // sender
|
|
||||||
"", // encoding
|
|
||||||
1, // published
|
|
||||||
)
|
|
||||||
require.Nil(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create cache to trigger migration
|
|
||||||
cacheDuration := 17 * time.Hour
|
|
||||||
c, err := newSqliteCache(filename, "", cacheDuration, 0, 0, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
checkSchemaVersion(t, c.db)
|
|
||||||
|
|
||||||
// Check version
|
|
||||||
rows, err := db.Query(`SELECT version FROM main.schemaVersion WHERE id = 1`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.True(t, rows.Next())
|
|
||||||
var version int
|
|
||||||
require.Nil(t, rows.Scan(&version))
|
|
||||||
require.Equal(t, currentSchemaVersion, version)
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 10, len(messages))
|
|
||||||
for _, m := range messages {
|
|
||||||
require.True(t, m.Expires > time.Now().Add(cacheDuration-5*time.Second).Unix())
|
|
||||||
require.True(t, m.Expires < time.Now().Add(cacheDuration+5*time.Second).Unix())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_StartupQueries_WAL(t *testing.T) {
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
startupQueries := `pragma journal_mode = WAL;
|
|
||||||
pragma synchronous = normal;
|
|
||||||
pragma temp_store = memory;`
|
|
||||||
db, err := newSqliteCache(filename, startupQueries, time.Hour, 0, 0, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Nil(t, db.AddMessage(newDefaultMessage("mytopic", "some message")))
|
|
||||||
require.FileExists(t, filename)
|
|
||||||
require.FileExists(t, filename+"-wal")
|
|
||||||
require.FileExists(t, filename+"-shm")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_StartupQueries_None(t *testing.T) {
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
startupQueries := ""
|
|
||||||
db, err := newSqliteCache(filename, startupQueries, time.Hour, 0, 0, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Nil(t, db.AddMessage(newDefaultMessage("mytopic", "some message")))
|
|
||||||
require.FileExists(t, filename)
|
|
||||||
require.NoFileExists(t, filename+"-wal")
|
|
||||||
require.NoFileExists(t, filename+"-shm")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_StartupQueries_Fail(t *testing.T) {
|
|
||||||
filename := newSqliteTestCacheFile(t)
|
|
||||||
startupQueries := `xx error`
|
|
||||||
_, err := newSqliteCache(filename, startupQueries, time.Hour, 0, 0, false)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_Sender(t *testing.T) {
|
|
||||||
testSender(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_Sender(t *testing.T) {
|
|
||||||
testSender(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSender(t *testing.T, c *messageCache) {
|
|
||||||
m1 := newDefaultMessage("mytopic", "mymessage")
|
|
||||||
m1.Sender = netip.MustParseAddr("1.2.3.4")
|
|
||||||
require.Nil(t, c.AddMessage(m1))
|
|
||||||
|
|
||||||
m2 := newDefaultMessage("mytopic", "mymessage without sender")
|
|
||||||
require.Nil(t, c.AddMessage(m2))
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 2, len(messages))
|
|
||||||
require.Equal(t, messages[0].Sender, netip.MustParseAddr("1.2.3.4"))
|
|
||||||
require.Equal(t, messages[1].Sender, netip.Addr{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSqliteCache_DeleteScheduledBySequenceID(t *testing.T) {
|
|
||||||
testDeleteScheduledBySequenceID(t, newSqliteTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_DeleteScheduledBySequenceID(t *testing.T) {
|
|
||||||
testDeleteScheduledBySequenceID(t, newMemTestCache(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDeleteScheduledBySequenceID(t *testing.T, c *messageCache) {
|
|
||||||
// Create a scheduled (unpublished) message
|
|
||||||
scheduledMsg := newDefaultMessage("mytopic", "scheduled message")
|
|
||||||
scheduledMsg.ID = "scheduled1"
|
|
||||||
scheduledMsg.SequenceID = "seq123"
|
|
||||||
scheduledMsg.Time = time.Now().Add(time.Hour).Unix() // Future time makes it scheduled
|
|
||||||
require.Nil(t, c.AddMessage(scheduledMsg))
|
|
||||||
|
|
||||||
// Create a published message with different sequence ID
|
|
||||||
publishedMsg := newDefaultMessage("mytopic", "published message")
|
|
||||||
publishedMsg.ID = "published1"
|
|
||||||
publishedMsg.SequenceID = "seq456"
|
|
||||||
publishedMsg.Time = time.Now().Add(-time.Hour).Unix() // Past time makes it published
|
|
||||||
require.Nil(t, c.AddMessage(publishedMsg))
|
|
||||||
|
|
||||||
// Create a scheduled message in a different topic
|
|
||||||
otherTopicMsg := newDefaultMessage("othertopic", "other scheduled")
|
|
||||||
otherTopicMsg.ID = "other1"
|
|
||||||
otherTopicMsg.SequenceID = "seq123" // Same sequence ID as scheduledMsg
|
|
||||||
otherTopicMsg.Time = time.Now().Add(time.Hour).Unix()
|
|
||||||
require.Nil(t, c.AddMessage(otherTopicMsg))
|
|
||||||
|
|
||||||
// Verify all messages exist (including scheduled)
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 2, len(messages))
|
|
||||||
|
|
||||||
messages, err = c.Messages("othertopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
|
|
||||||
// Delete scheduled message by sequence ID and verify returned IDs
|
|
||||||
deletedIDs, err := c.DeleteScheduledBySequenceID("mytopic", "seq123")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(deletedIDs))
|
|
||||||
require.Equal(t, "scheduled1", deletedIDs[0])
|
|
||||||
|
|
||||||
// Verify scheduled message is deleted
|
|
||||||
messages, err = c.Messages("mytopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "published message", messages[0].Message)
|
|
||||||
|
|
||||||
// Verify other topic's message still exists (topic-scoped deletion)
|
|
||||||
messages, err = c.Messages("othertopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "other scheduled", messages[0].Message)
|
|
||||||
|
|
||||||
// Deleting non-existent sequence ID should return empty list
|
|
||||||
deletedIDs, err = c.DeleteScheduledBySequenceID("mytopic", "nonexistent")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Empty(t, deletedIDs)
|
|
||||||
|
|
||||||
// Deleting published message should not affect it (only deletes unpublished)
|
|
||||||
deletedIDs, err = c.DeleteScheduledBySequenceID("mytopic", "seq456")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Empty(t, deletedIDs)
|
|
||||||
|
|
||||||
messages, err = c.Messages("mytopic", sinceAllMessages, true)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, 1, len(messages))
|
|
||||||
require.Equal(t, "published message", messages[0].Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkSchemaVersion(t *testing.T, db *sql.DB) {
|
|
||||||
rows, err := db.Query(`SELECT version FROM schemaVersion`)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.True(t, rows.Next())
|
|
||||||
|
|
||||||
var schemaVersion int
|
|
||||||
require.Nil(t, rows.Scan(&schemaVersion))
|
|
||||||
require.Equal(t, currentSchemaVersion, schemaVersion)
|
|
||||||
require.Nil(t, rows.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemCache_NopCache(t *testing.T) {
|
|
||||||
c, _ := newNopCache()
|
|
||||||
require.Nil(t, c.AddMessage(newDefaultMessage("mytopic", "my message")))
|
|
||||||
|
|
||||||
messages, err := c.Messages("mytopic", sinceAllMessages, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Empty(t, messages)
|
|
||||||
|
|
||||||
topics, err := c.Topics()
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Empty(t, topics)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSqliteTestCache(t *testing.T) *messageCache {
|
|
||||||
c, err := newSqliteCache(newSqliteTestCacheFile(t), "", time.Hour, 0, 0, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSqliteTestCacheFile(t *testing.T) string {
|
|
||||||
return filepath.Join(t.TempDir(), "cache.db")
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSqliteTestCacheFromFile(t *testing.T, filename, startupQueries string) *messageCache {
|
|
||||||
c, err := newSqliteCache(filename, startupQueries, time.Hour, 0, 0, false)
|
|
||||||
require.Nil(t, err)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMemTestCache(t *testing.T) *messageCache {
|
|
||||||
c, err := newMemCache()
|
|
||||||
require.Nil(t, err)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
168
server/server.go
168
server/server.go
@@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"database/sql"
|
||||||
"embed"
|
"embed"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -32,16 +33,21 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
"heckel.io/ntfy/v2/db"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/message"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/payments"
|
"heckel.io/ntfy/v2/payments"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
"heckel.io/ntfy/v2/util/sprig"
|
"heckel.io/ntfy/v2/util/sprig"
|
||||||
|
"heckel.io/ntfy/v2/webpush"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is the main server, providing the UI and API for ntfy
|
// Server is the main server, providing the UI and API for ntfy
|
||||||
type Server struct {
|
type Server struct {
|
||||||
config *Config
|
config *Config
|
||||||
|
db *sql.DB // Shared PostgreSQL connection pool, nil when using SQLite
|
||||||
httpServer *http.Server
|
httpServer *http.Server
|
||||||
httpsServer *http.Server
|
httpsServer *http.Server
|
||||||
httpMetricsServer *http.Server
|
httpMetricsServer *http.Server
|
||||||
@@ -56,8 +62,8 @@ type Server struct {
|
|||||||
messages int64 // Total number of messages (persisted if messageCache enabled)
|
messages int64 // Total number of messages (persisted if messageCache enabled)
|
||||||
messagesHistory []int64 // Last n values of the messages counter, used to determine rate
|
messagesHistory []int64 // Last n values of the messages counter, used to determine rate
|
||||||
userManager *user.Manager // Might be nil!
|
userManager *user.Manager // Might be nil!
|
||||||
messageCache *messageCache // Database that stores the messages
|
messageCache *message.Cache // Database that stores the messages
|
||||||
webPush *webPushStore // Database that stores web push subscriptions
|
webPush *webpush.Store // Database that stores web push subscriptions
|
||||||
fileCache *fileCache // File system based cache that stores attachments
|
fileCache *fileCache // File system based cache that stores attachments
|
||||||
stripe stripeAPI // Stripe API, can be replaced with a mock
|
stripe stripeAPI // Stripe API, can be replaced with a mock
|
||||||
priceCache *util.LookupCache[map[string]int64] // Stripe price ID -> price as cents (USD implied!)
|
priceCache *util.LookupCache[map[string]int64] // Stripe price ID -> price as cents (USD implied!)
|
||||||
@@ -172,21 +178,38 @@ func New(conf *Config) (*Server, error) {
|
|||||||
if payments.Available && conf.StripeSecretKey != "" {
|
if payments.Available && conf.StripeSecretKey != "" {
|
||||||
stripe = newStripeAPI()
|
stripe = newStripeAPI()
|
||||||
}
|
}
|
||||||
messageCache, err := createMessageCache(conf)
|
// OpenPostgres shared PostgreSQL connection pool if configured
|
||||||
|
var pool *sql.DB
|
||||||
|
if conf.DatabaseURL != "" {
|
||||||
|
var err error
|
||||||
|
pool, err = db.OpenPostgres(conf.DatabaseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var webPush *webPushStore
|
}
|
||||||
|
messageCache, err := createMessageCache(conf, pool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var wp *webpush.Store
|
||||||
if conf.WebPushPublicKey != "" {
|
if conf.WebPushPublicKey != "" {
|
||||||
webPush, err = newWebPushStore(conf.WebPushFile, conf.WebPushStartupQueries)
|
if pool != nil {
|
||||||
|
wp, err = webpush.NewPostgresStore(pool)
|
||||||
|
} else {
|
||||||
|
wp, err = webpush.NewSQLiteStore(conf.WebPushFile, conf.WebPushStartupQueries)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
topics, err := messageCache.Topics()
|
topicIDs, err := messageCache.Topics()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
topics := make(map[string]*topic, len(topicIDs))
|
||||||
|
for _, id := range topicIDs {
|
||||||
|
topics[id] = newTopic(id)
|
||||||
|
}
|
||||||
messages, err := messageCache.Stats()
|
messages, err := messageCache.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -199,9 +222,10 @@ func New(conf *Config) (*Server, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var userManager *user.Manager
|
var userManager *user.Manager
|
||||||
if conf.AuthFile != "" {
|
if conf.AuthFile != "" || pool != nil {
|
||||||
authConfig := &user.Config{
|
authConfig := &user.Config{
|
||||||
Filename: conf.AuthFile,
|
Filename: conf.AuthFile,
|
||||||
|
DatabaseURL: conf.DatabaseURL,
|
||||||
StartupQueries: conf.AuthStartupQueries,
|
StartupQueries: conf.AuthStartupQueries,
|
||||||
DefaultAccess: conf.AuthDefault,
|
DefaultAccess: conf.AuthDefault,
|
||||||
ProvisionEnabled: true, // Enable provisioning of users and access
|
ProvisionEnabled: true, // Enable provisioning of users and access
|
||||||
@@ -211,7 +235,11 @@ func New(conf *Config) (*Server, error) {
|
|||||||
BcryptCost: conf.AuthBcryptCost,
|
BcryptCost: conf.AuthBcryptCost,
|
||||||
QueueWriterInterval: conf.AuthStatsQueueWriterInterval,
|
QueueWriterInterval: conf.AuthStatsQueueWriterInterval,
|
||||||
}
|
}
|
||||||
userManager, err = user.NewManager(authConfig)
|
if pool != nil {
|
||||||
|
userManager, err = user.NewPostgresManager(pool, authConfig)
|
||||||
|
} else {
|
||||||
|
userManager, err = user.NewSQLiteManager(conf.AuthFile, conf.AuthStartupQueries, authConfig)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -232,8 +260,9 @@ func New(conf *Config) (*Server, error) {
|
|||||||
}
|
}
|
||||||
s := &Server{
|
s := &Server{
|
||||||
config: conf,
|
config: conf,
|
||||||
|
db: pool,
|
||||||
messageCache: messageCache,
|
messageCache: messageCache,
|
||||||
webPush: webPush,
|
webPush: wp,
|
||||||
fileCache: fileCache,
|
fileCache: fileCache,
|
||||||
firebaseClient: firebaseClient,
|
firebaseClient: firebaseClient,
|
||||||
smtpSender: mailer,
|
smtpSender: mailer,
|
||||||
@@ -248,13 +277,15 @@ func New(conf *Config) (*Server, error) {
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMessageCache(conf *Config) (*messageCache, error) {
|
func createMessageCache(conf *Config, pool *sql.DB) (*message.Cache, error) {
|
||||||
if conf.CacheDuration == 0 {
|
if conf.CacheDuration == 0 {
|
||||||
return newNopCache()
|
return message.NewNopStore()
|
||||||
|
} else if pool != nil {
|
||||||
|
return message.NewPostgresStore(pool, conf.CacheBatchSize, conf.CacheBatchTimeout)
|
||||||
} else if conf.CacheFile != "" {
|
} else if conf.CacheFile != "" {
|
||||||
return newSqliteCache(conf.CacheFile, conf.CacheStartupQueries, conf.CacheDuration, conf.CacheBatchSize, conf.CacheBatchTimeout, false)
|
return message.NewSQLiteStore(conf.CacheFile, conf.CacheStartupQueries, conf.CacheDuration, conf.CacheBatchSize, conf.CacheBatchTimeout, false)
|
||||||
}
|
}
|
||||||
return newMemCache()
|
return message.NewMemStore()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run executes the main server. It listens on HTTP (+ HTTPS, if configured), and starts
|
// Run executes the main server. It listens on HTTP (+ HTTPS, if configured), and starts
|
||||||
@@ -389,6 +420,9 @@ func (s *Server) closeDatabases() {
|
|||||||
if s.webPush != nil {
|
if s.webPush != nil {
|
||||||
s.webPush.Close()
|
s.webPush.Close()
|
||||||
}
|
}
|
||||||
|
if s.db != nil {
|
||||||
|
s.db.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle is the main entry point for all HTTP requests
|
// handle is the main entry point for all HTTP requests
|
||||||
@@ -731,11 +765,11 @@ func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor)
|
|||||||
// - avoid abuse (e.g. 1 uploader, 1k downloaders)
|
// - avoid abuse (e.g. 1 uploader, 1k downloaders)
|
||||||
// - and also uses the higher bandwidth limits of a paying user
|
// - and also uses the higher bandwidth limits of a paying user
|
||||||
m, err := s.messageCache.Message(messageID)
|
m, err := s.messageCache.Message(messageID)
|
||||||
if errors.Is(err, errMessageNotFound) {
|
if errors.Is(err, model.ErrMessageNotFound) {
|
||||||
if s.config.CacheBatchTimeout > 0 {
|
if s.config.CacheBatchTimeout > 0 {
|
||||||
// Strange edge case: If we immediately after upload request the file (the web app does this for images),
|
// Strange edge case: If we immediately after upload request the file (the web app does this for images),
|
||||||
// and messages are persisted asynchronously, retry fetching from the database
|
// and messages are persisted asynchronously, retry fetching from the database
|
||||||
m, err = util.Retry(func() (*message, error) {
|
m, err = util.Retry(func() (*model.Message, error) {
|
||||||
return s.messageCache.Message(messageID)
|
return s.messageCache.Message(messageID)
|
||||||
}, s.config.CacheBatchTimeout, 100*time.Millisecond, 300*time.Millisecond, 600*time.Millisecond)
|
}, s.config.CacheBatchTimeout, 100*time.Millisecond, 300*time.Millisecond, 600*time.Millisecond)
|
||||||
}
|
}
|
||||||
@@ -781,7 +815,7 @@ func (s *Server) handleMatrixDiscovery(w http.ResponseWriter) error {
|
|||||||
return writeMatrixDiscoveryResponse(w)
|
return writeMatrixDiscoveryResponse(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*message, error) {
|
func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*model.Message, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
t, err := fromContext[*topic](r, contextTopic)
|
t, err := fromContext[*topic](r, contextTopic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -795,7 +829,7 @@ func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*message, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := newDefaultMessage(t.ID, "")
|
m := model.NewDefaultMessage(t.ID, "")
|
||||||
cache, firebase, email, call, template, unifiedpush, priorityStr, e := s.parsePublishParams(r, m)
|
cache, firebase, email, call, template, unifiedpush, priorityStr, e := s.parsePublishParams(r, m)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return nil, e.With(t)
|
return nil, e.With(t)
|
||||||
@@ -820,7 +854,7 @@ func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*message, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.PollID != "" {
|
if m.PollID != "" {
|
||||||
m = newPollRequestMessage(t.ID, m.PollID)
|
m = model.NewPollRequestMessage(t.ID, m.PollID)
|
||||||
}
|
}
|
||||||
m.Sender = v.IP()
|
m.Sender = v.IP()
|
||||||
m.User = v.MaybeUserID()
|
m.User = v.MaybeUserID()
|
||||||
@@ -909,7 +943,7 @@ func (s *Server) handlePublish(w http.ResponseWriter, r *http.Request, v *visito
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
minc(metricMessagesPublishedSuccess)
|
minc(metricMessagesPublishedSuccess)
|
||||||
return s.writeJSON(w, m.forJSON())
|
return s.writeJSON(w, m.ForJSON())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handlePublishMatrix(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handlePublishMatrix(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
@@ -938,11 +972,11 @@ func (s *Server) handlePublishMatrix(w http.ResponseWriter, r *http.Request, v *
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleDelete(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handleDelete(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
return s.handleActionMessage(w, r, v, messageDeleteEvent)
|
return s.handleActionMessage(w, r, v, model.MessageDeleteEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleClear(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handleClear(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
return s.handleActionMessage(w, r, v, messageClearEvent)
|
return s.handleActionMessage(w, r, v, model.MessageClearEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *visitor, event string) error {
|
func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *visitor, event string) error {
|
||||||
@@ -962,7 +996,7 @@ func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *
|
|||||||
return e.With(t)
|
return e.With(t)
|
||||||
}
|
}
|
||||||
// Create an action message with the given event type
|
// Create an action message with the given event type
|
||||||
m := newActionMessage(event, t.ID, sequenceID)
|
m := model.NewActionMessage(event, t.ID, sequenceID)
|
||||||
m.Sender = v.IP()
|
m.Sender = v.IP()
|
||||||
m.User = v.MaybeUserID()
|
m.User = v.MaybeUserID()
|
||||||
m.Expires = time.Unix(m.Time, 0).Add(v.Limits().MessageExpiryDuration).Unix()
|
m.Expires = time.Unix(m.Time, 0).Add(v.Limits().MessageExpiryDuration).Unix()
|
||||||
@@ -978,7 +1012,7 @@ func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *
|
|||||||
if s.config.WebPushPublicKey != "" {
|
if s.config.WebPushPublicKey != "" {
|
||||||
go s.publishToWebPushEndpoints(v, m)
|
go s.publishToWebPushEndpoints(v, m)
|
||||||
}
|
}
|
||||||
if event == messageDeleteEvent {
|
if event == model.MessageDeleteEvent {
|
||||||
// Delete any existing scheduled message with the same sequence ID
|
// Delete any existing scheduled message with the same sequence ID
|
||||||
deletedIDs, err := s.messageCache.DeleteScheduledBySequenceID(t.ID, sequenceID)
|
deletedIDs, err := s.messageCache.DeleteScheduledBySequenceID(t.ID, sequenceID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -999,10 +1033,10 @@ func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *
|
|||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
s.messages++
|
s.messages++
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
return s.writeJSON(w, m.forJSON())
|
return s.writeJSON(w, m.ForJSON())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendToFirebase(v *visitor, m *message) {
|
func (s *Server) sendToFirebase(v *visitor, m *model.Message) {
|
||||||
logvm(v, m).Tag(tagFirebase).Debug("Publishing to Firebase")
|
logvm(v, m).Tag(tagFirebase).Debug("Publishing to Firebase")
|
||||||
if err := s.firebaseClient.Send(v, m); err != nil {
|
if err := s.firebaseClient.Send(v, m); err != nil {
|
||||||
minc(metricFirebasePublishedFailure)
|
minc(metricFirebasePublishedFailure)
|
||||||
@@ -1016,7 +1050,7 @@ func (s *Server) sendToFirebase(v *visitor, m *message) {
|
|||||||
minc(metricFirebasePublishedSuccess)
|
minc(metricFirebasePublishedSuccess)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendEmail(v *visitor, m *message, email string) {
|
func (s *Server) sendEmail(v *visitor, m *model.Message, email string) {
|
||||||
logvm(v, m).Tag(tagEmail).Field("email", email).Debug("Sending email to %s", email)
|
logvm(v, m).Tag(tagEmail).Field("email", email).Debug("Sending email to %s", email)
|
||||||
if err := s.smtpSender.Send(v, m, email); err != nil {
|
if err := s.smtpSender.Send(v, m, email); err != nil {
|
||||||
logvm(v, m).Tag(tagEmail).Field("email", email).Err(err).Warn("Unable to send email to %s: %v", email, err.Error())
|
logvm(v, m).Tag(tagEmail).Field("email", email).Err(err).Warn("Unable to send email to %s: %v", email, err.Error())
|
||||||
@@ -1026,7 +1060,7 @@ func (s *Server) sendEmail(v *visitor, m *message, email string) {
|
|||||||
minc(metricEmailsPublishedSuccess)
|
minc(metricEmailsPublishedSuccess)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) forwardPollRequest(v *visitor, m *message) {
|
func (s *Server) forwardPollRequest(v *visitor, m *model.Message) {
|
||||||
topicURL := fmt.Sprintf("%s/%s", s.config.BaseURL, m.Topic)
|
topicURL := fmt.Sprintf("%s/%s", s.config.BaseURL, m.Topic)
|
||||||
topicHash := fmt.Sprintf("%x", sha256.Sum256([]byte(topicURL)))
|
topicHash := fmt.Sprintf("%x", sha256.Sum256([]byte(topicURL)))
|
||||||
forwardURL := fmt.Sprintf("%s/%s", s.config.UpstreamBaseURL, topicHash)
|
forwardURL := fmt.Sprintf("%s/%s", s.config.UpstreamBaseURL, topicHash)
|
||||||
@@ -1058,7 +1092,7 @@ func (s *Server) forwardPollRequest(v *visitor, m *message) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) parsePublishParams(r *http.Request, m *message) (cache bool, firebase bool, email, call string, template templateMode, unifiedpush bool, priorityStr string, err *errHTTP) {
|
func (s *Server) parsePublishParams(r *http.Request, m *model.Message) (cache bool, firebase bool, email, call string, template templateMode, unifiedpush bool, priorityStr string, err *errHTTP) {
|
||||||
if r.Method != http.MethodGet && updatePathRegex.MatchString(r.URL.Path) {
|
if r.Method != http.MethodGet && updatePathRegex.MatchString(r.URL.Path) {
|
||||||
pathSequenceID, err := s.sequenceIDFromPath(r.URL.Path)
|
pathSequenceID, err := s.sequenceIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1085,7 +1119,7 @@ func (s *Server) parsePublishParams(r *http.Request, m *message) (cache bool, fi
|
|||||||
filename := readParam(r, "x-filename", "filename", "file", "f")
|
filename := readParam(r, "x-filename", "filename", "file", "f")
|
||||||
attach := readParam(r, "x-attach", "attach", "a")
|
attach := readParam(r, "x-attach", "attach", "a")
|
||||||
if attach != "" || filename != "" {
|
if attach != "" || filename != "" {
|
||||||
m.Attachment = &attachment{}
|
m.Attachment = &model.Attachment{}
|
||||||
}
|
}
|
||||||
if filename != "" {
|
if filename != "" {
|
||||||
m.Attachment.Name = filename
|
m.Attachment.Name = filename
|
||||||
@@ -1206,8 +1240,8 @@ func (s *Server) parsePublishParams(r *http.Request, m *message) (cache bool, fi
|
|||||||
// If file.txt is <= 4096 (message limit) and valid UTF-8, treat it as a message
|
// If file.txt is <= 4096 (message limit) and valid UTF-8, treat it as a message
|
||||||
// 7. curl -T file.txt ntfy.sh/mytopic
|
// 7. curl -T file.txt ntfy.sh/mytopic
|
||||||
// In all other cases, mostly if file.txt is > message limit, treat it as an attachment
|
// In all other cases, mostly if file.txt is > message limit, treat it as an attachment
|
||||||
func (s *Server) handlePublishBody(r *http.Request, v *visitor, m *message, body *util.PeekedReadCloser, template templateMode, unifiedpush bool, priorityStr string) error {
|
func (s *Server) handlePublishBody(r *http.Request, v *visitor, m *model.Message, body *util.PeekedReadCloser, template templateMode, unifiedpush bool, priorityStr string) error {
|
||||||
if m.Event == pollRequestEvent { // Case 1
|
if m.Event == model.PollRequestEvent { // Case 1
|
||||||
return s.handleBodyDiscard(body)
|
return s.handleBodyDiscard(body)
|
||||||
} else if unifiedpush {
|
} else if unifiedpush {
|
||||||
return s.handleBodyAsMessageAutoDetect(m, body) // Case 2
|
return s.handleBodyAsMessageAutoDetect(m, body) // Case 2
|
||||||
@@ -1229,7 +1263,7 @@ func (s *Server) handleBodyDiscard(body *util.PeekedReadCloser) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleBodyAsMessageAutoDetect(m *message, body *util.PeekedReadCloser) error {
|
func (s *Server) handleBodyAsMessageAutoDetect(m *model.Message, body *util.PeekedReadCloser) error {
|
||||||
if utf8.Valid(body.PeekedBytes) {
|
if utf8.Valid(body.PeekedBytes) {
|
||||||
m.Message = string(body.PeekedBytes) // Do not trim
|
m.Message = string(body.PeekedBytes) // Do not trim
|
||||||
} else {
|
} else {
|
||||||
@@ -1239,7 +1273,7 @@ func (s *Server) handleBodyAsMessageAutoDetect(m *message, body *util.PeekedRead
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleBodyAsTextMessage(m *message, body *util.PeekedReadCloser) error {
|
func (s *Server) handleBodyAsTextMessage(m *model.Message, body *util.PeekedReadCloser) error {
|
||||||
if !utf8.Valid(body.PeekedBytes) {
|
if !utf8.Valid(body.PeekedBytes) {
|
||||||
return errHTTPBadRequestMessageNotUTF8.With(m)
|
return errHTTPBadRequestMessageNotUTF8.With(m)
|
||||||
}
|
}
|
||||||
@@ -1252,7 +1286,7 @@ func (s *Server) handleBodyAsTextMessage(m *message, body *util.PeekedReadCloser
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleBodyAsTemplatedTextMessage(m *message, template templateMode, body *util.PeekedReadCloser, priorityStr string) error {
|
func (s *Server) handleBodyAsTemplatedTextMessage(m *model.Message, template templateMode, body *util.PeekedReadCloser, priorityStr string) error {
|
||||||
body, err := util.Peek(body, max(s.config.MessageSizeLimit, jsonBodyBytesLimit))
|
body, err := util.Peek(body, max(s.config.MessageSizeLimit, jsonBodyBytesLimit))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1277,7 +1311,7 @@ func (s *Server) handleBodyAsTemplatedTextMessage(m *message, template templateM
|
|||||||
|
|
||||||
// renderTemplateFromFile transforms the JSON message body according to a template from the filesystem.
|
// renderTemplateFromFile transforms the JSON message body according to a template from the filesystem.
|
||||||
// The template file must be in the templates directory, or in the configured template directory.
|
// The template file must be in the templates directory, or in the configured template directory.
|
||||||
func (s *Server) renderTemplateFromFile(m *message, templateName, peekedBody string) error {
|
func (s *Server) renderTemplateFromFile(m *model.Message, templateName, peekedBody string) error {
|
||||||
if !templateNameRegex.MatchString(templateName) {
|
if !templateNameRegex.MatchString(templateName) {
|
||||||
return errHTTPBadRequestTemplateFileNotFound
|
return errHTTPBadRequestTemplateFileNotFound
|
||||||
}
|
}
|
||||||
@@ -1319,7 +1353,7 @@ func (s *Server) renderTemplateFromFile(m *message, templateName, peekedBody str
|
|||||||
|
|
||||||
// renderTemplateFromParams transforms the JSON message body according to the inline template in the
|
// renderTemplateFromParams transforms the JSON message body according to the inline template in the
|
||||||
// message, title, and priority parameters.
|
// message, title, and priority parameters.
|
||||||
func (s *Server) renderTemplateFromParams(m *message, peekedBody string, priorityStr string) error {
|
func (s *Server) renderTemplateFromParams(m *model.Message, peekedBody string, priorityStr string) error {
|
||||||
var err error
|
var err error
|
||||||
if m.Message, err = s.renderTemplate("priority query parameter", m.Message, peekedBody); err != nil {
|
if m.Message, err = s.renderTemplate("priority query parameter", m.Message, peekedBody); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1360,7 +1394,7 @@ func (s *Server) renderTemplate(name, tpl, source string) (string, error) {
|
|||||||
return strings.TrimSpace(strings.ReplaceAll(buf.String(), "\\n", "\n")), nil // replace any remaining "\n" (those outside of template curly braces) with newlines
|
return strings.TrimSpace(strings.ReplaceAll(buf.String(), "\\n", "\n")), nil // replace any remaining "\n" (those outside of template curly braces) with newlines
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *message, body *util.PeekedReadCloser) error {
|
func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Message, body *util.PeekedReadCloser) error {
|
||||||
if s.fileCache == nil || s.config.BaseURL == "" || s.config.AttachmentCacheDir == "" {
|
if s.fileCache == nil || s.config.BaseURL == "" || s.config.AttachmentCacheDir == "" {
|
||||||
return errHTTPBadRequestAttachmentsDisallowed.With(m)
|
return errHTTPBadRequestAttachmentsDisallowed.With(m)
|
||||||
}
|
}
|
||||||
@@ -1384,7 +1418,7 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *message,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m.Attachment == nil {
|
if m.Attachment == nil {
|
||||||
m.Attachment = &attachment{}
|
m.Attachment = &model.Attachment{}
|
||||||
}
|
}
|
||||||
var ext string
|
var ext string
|
||||||
m.Attachment.Expires = attachmentExpiry
|
m.Attachment.Expires = attachmentExpiry
|
||||||
@@ -1411,9 +1445,9 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *message,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleSubscribeJSON(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handleSubscribeJSON(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
encoder := func(msg *message) (string, error) {
|
encoder := func(msg *model.Message) (string, error) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if err := json.NewEncoder(&buf).Encode(msg.forJSON()); err != nil {
|
if err := json.NewEncoder(&buf).Encode(msg.ForJSON()); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return buf.String(), nil
|
return buf.String(), nil
|
||||||
@@ -1422,12 +1456,12 @@ func (s *Server) handleSubscribeJSON(w http.ResponseWriter, r *http.Request, v *
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleSubscribeSSE(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handleSubscribeSSE(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
encoder := func(msg *message) (string, error) {
|
encoder := func(msg *model.Message) (string, error) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if err := json.NewEncoder(&buf).Encode(msg.forJSON()); err != nil {
|
if err := json.NewEncoder(&buf).Encode(msg.ForJSON()); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if msg.Event != messageEvent && msg.Event != messageDeleteEvent && msg.Event != messageClearEvent {
|
if msg.Event != model.MessageEvent && msg.Event != model.MessageDeleteEvent && msg.Event != model.MessageClearEvent {
|
||||||
return fmt.Sprintf("event: %s\ndata: %s\n", msg.Event, buf.String()), nil // Browser's .onmessage() does not fire on this!
|
return fmt.Sprintf("event: %s\ndata: %s\n", msg.Event, buf.String()), nil // Browser's .onmessage() does not fire on this!
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("data: %s\n", buf.String()), nil
|
return fmt.Sprintf("data: %s\n", buf.String()), nil
|
||||||
@@ -1436,8 +1470,8 @@ func (s *Server) handleSubscribeSSE(w http.ResponseWriter, r *http.Request, v *v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleSubscribeRaw(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
func (s *Server) handleSubscribeRaw(w http.ResponseWriter, r *http.Request, v *visitor) error {
|
||||||
encoder := func(msg *message) (string, error) {
|
encoder := func(msg *model.Message) (string, error) {
|
||||||
if msg.Event == messageEvent { // only handle default events
|
if msg.Event == model.MessageEvent { // only handle default events
|
||||||
return strings.ReplaceAll(msg.Message, "\n", " ") + "\n", nil
|
return strings.ReplaceAll(msg.Message, "\n", " ") + "\n", nil
|
||||||
}
|
}
|
||||||
return "\n", nil // "keepalive" and "open" events just send an empty line
|
return "\n", nil // "keepalive" and "open" events just send an empty line
|
||||||
@@ -1472,7 +1506,7 @@ func (s *Server) handleSubscribeHTTP(w http.ResponseWriter, r *http.Request, v *
|
|||||||
closed = true
|
closed = true
|
||||||
wlock.Unlock()
|
wlock.Unlock()
|
||||||
}()
|
}()
|
||||||
sub := func(v *visitor, msg *message) error {
|
sub := func(v *visitor, msg *model.Message) error {
|
||||||
if !filters.Pass(msg) {
|
if !filters.Pass(msg) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1515,7 +1549,7 @@ func (s *Server) handleSubscribeHTTP(w http.ResponseWriter, r *http.Request, v *
|
|||||||
topics[i].Unsubscribe(subscriberID) // Order!
|
topics[i].Unsubscribe(subscriberID) // Order!
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := sub(v, newOpenMessage(topicsStr)); err != nil { // Send out open message
|
if err := sub(v, model.NewOpenMessage(topicsStr)); err != nil { // Send out open message
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.sendOldMessages(topics, since, scheduled, v, sub); err != nil {
|
if err := s.sendOldMessages(topics, since, scheduled, v, sub); err != nil {
|
||||||
@@ -1538,7 +1572,7 @@ func (s *Server) handleSubscribeHTTP(w http.ResponseWriter, r *http.Request, v *
|
|||||||
for _, t := range topics {
|
for _, t := range topics {
|
||||||
t.Keepalive()
|
t.Keepalive()
|
||||||
}
|
}
|
||||||
if err := sub(v, newKeepaliveMessage(topicsStr)); err != nil { // Send keepalive message
|
if err := sub(v, model.NewKeepaliveMessage(topicsStr)); err != nil { // Send keepalive message
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1634,7 +1668,7 @@ func (s *Server) handleSubscribeWS(w http.ResponseWriter, r *http.Request, v *vi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
sub := func(v *visitor, msg *message) error {
|
sub := func(v *visitor, msg *model.Message) error {
|
||||||
if !filters.Pass(msg) {
|
if !filters.Pass(msg) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1664,7 +1698,7 @@ func (s *Server) handleSubscribeWS(w http.ResponseWriter, r *http.Request, v *vi
|
|||||||
topics[i].Unsubscribe(subscriberID) // Order!
|
topics[i].Unsubscribe(subscriberID) // Order!
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := sub(v, newOpenMessage(topicsStr)); err != nil { // Send out open message
|
if err := sub(v, model.NewOpenMessage(topicsStr)); err != nil { // Send out open message
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.sendOldMessages(topics, since, scheduled, v, sub); err != nil {
|
if err := s.sendOldMessages(topics, since, scheduled, v, sub); err != nil {
|
||||||
@@ -1681,7 +1715,7 @@ func (s *Server) handleSubscribeWS(w http.ResponseWriter, r *http.Request, v *vi
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSubscribeParams(r *http.Request) (poll bool, since sinceMarker, scheduled bool, filters *queryFilter, err error) {
|
func parseSubscribeParams(r *http.Request) (poll bool, since model.SinceMarker, scheduled bool, filters *queryFilter, err error) {
|
||||||
poll = readBoolParam(r, false, "x-poll", "poll", "po")
|
poll = readBoolParam(r, false, "x-poll", "poll", "po")
|
||||||
scheduled = readBoolParam(r, false, "x-scheduled", "scheduled", "sched")
|
scheduled = readBoolParam(r, false, "x-scheduled", "scheduled", "sched")
|
||||||
since, err = parseSince(r, poll)
|
since, err = parseSince(r, poll)
|
||||||
@@ -1762,11 +1796,11 @@ func (s *Server) setRateVisitors(r *http.Request, v *visitor, rateTopics []*topi
|
|||||||
|
|
||||||
// sendOldMessages selects old messages from the messageCache and calls sub for each of them. It uses since as the
|
// sendOldMessages selects old messages from the messageCache and calls sub for each of them. It uses since as the
|
||||||
// marker, returning only messages that are newer than the marker.
|
// marker, returning only messages that are newer than the marker.
|
||||||
func (s *Server) sendOldMessages(topics []*topic, since sinceMarker, scheduled bool, v *visitor, sub subscriber) error {
|
func (s *Server) sendOldMessages(topics []*topic, since model.SinceMarker, scheduled bool, v *visitor, sub subscriber) error {
|
||||||
if since.IsNone() {
|
if since.IsNone() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
messages := make([]*message, 0)
|
messages := make([]*model.Message, 0)
|
||||||
for _, t := range topics {
|
for _, t := range topics {
|
||||||
topicMessages, err := s.messageCache.Messages(t.ID, since, scheduled)
|
topicMessages, err := s.messageCache.Messages(t.ID, since, scheduled)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1789,32 +1823,32 @@ func (s *Server) sendOldMessages(topics []*topic, since sinceMarker, scheduled b
|
|||||||
//
|
//
|
||||||
// Values in the "since=..." parameter can be either a unix timestamp or a duration (e.g. 12h),
|
// Values in the "since=..." parameter can be either a unix timestamp or a duration (e.g. 12h),
|
||||||
// "all" for all messages, or "latest" for the most recent message for a topic
|
// "all" for all messages, or "latest" for the most recent message for a topic
|
||||||
func parseSince(r *http.Request, poll bool) (sinceMarker, error) {
|
func parseSince(r *http.Request, poll bool) (model.SinceMarker, error) {
|
||||||
since := readParam(r, "x-since", "since", "si")
|
since := readParam(r, "x-since", "since", "si")
|
||||||
|
|
||||||
// Easy cases (empty, all, none)
|
// Easy cases (empty, all, none)
|
||||||
if since == "" {
|
if since == "" {
|
||||||
if poll {
|
if poll {
|
||||||
return sinceAllMessages, nil
|
return model.SinceAllMessages, nil
|
||||||
}
|
}
|
||||||
return sinceNoMessages, nil
|
return model.SinceNoMessages, nil
|
||||||
} else if since == "all" {
|
} else if since == "all" {
|
||||||
return sinceAllMessages, nil
|
return model.SinceAllMessages, nil
|
||||||
} else if since == "latest" {
|
} else if since == "latest" {
|
||||||
return sinceLatestMessage, nil
|
return model.SinceLatestMessage, nil
|
||||||
} else if since == "none" {
|
} else if since == "none" {
|
||||||
return sinceNoMessages, nil
|
return model.SinceNoMessages, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID, timestamp, duration
|
// ID, timestamp, duration
|
||||||
if validMessageID(since) {
|
if model.ValidMessageID(since) {
|
||||||
return newSinceID(since), nil
|
return model.NewSinceID(since), nil
|
||||||
} else if s, err := strconv.ParseInt(since, 10, 64); err == nil {
|
} else if s, err := strconv.ParseInt(since, 10, 64); err == nil {
|
||||||
return newSinceTime(s), nil
|
return model.NewSinceTime(s), nil
|
||||||
} else if d, err := time.ParseDuration(since); err == nil {
|
} else if d, err := time.ParseDuration(since); err == nil {
|
||||||
return newSinceTime(time.Now().Add(-1 * d).Unix()), nil
|
return model.NewSinceTime(time.Now().Add(-1 * d).Unix()), nil
|
||||||
}
|
}
|
||||||
return sinceNoMessages, errHTTPBadRequestSinceInvalid
|
return model.SinceNoMessages, errHTTPBadRequestSinceInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleOptions(w http.ResponseWriter, _ *http.Request, _ *visitor) error {
|
func (s *Server) handleOptions(w http.ResponseWriter, _ *http.Request, _ *visitor) error {
|
||||||
@@ -1970,14 +2004,14 @@ func (s *Server) runFirebaseKeepaliver() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-time.After(s.config.FirebaseKeepaliveInterval):
|
case <-time.After(s.config.FirebaseKeepaliveInterval):
|
||||||
s.sendToFirebase(v, newKeepaliveMessage(firebaseControlTopic))
|
s.sendToFirebase(v, model.NewKeepaliveMessage(firebaseControlTopic))
|
||||||
/*
|
/*
|
||||||
FIXME: Disable iOS polling entirely for now due to thundering herd problem (see #677)
|
FIXME: Disable iOS polling entirely for now due to thundering herd problem (see #677)
|
||||||
To solve this, we'd have to shard the iOS poll topics to spread out the polling evenly.
|
To solve this, we'd have to shard the iOS poll topics to spread out the polling evenly.
|
||||||
Given that it's not really necessary to poll, turning it off for now should not have any impact.
|
Given that it's not really necessary to poll, turning it off for now should not have any impact.
|
||||||
|
|
||||||
case <-time.After(s.config.FirebasePollInterval):
|
case <-time.After(s.config.FirebasePollInterval):
|
||||||
s.sendToFirebase(v, newKeepaliveMessage(firebasePollTopic))
|
s.sendToFirebase(v, model.NewKeepaliveMessage(firebasePollTopic))
|
||||||
*/
|
*/
|
||||||
case <-s.closeChan:
|
case <-s.closeChan:
|
||||||
return
|
return
|
||||||
@@ -2020,7 +2054,7 @@ func (s *Server) sendDelayedMessages() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendDelayedMessage(v *visitor, m *message) error {
|
func (s *Server) sendDelayedMessage(v *visitor, m *model.Message) error {
|
||||||
logvm(v, m).Debug("Sending delayed message")
|
logvm(v, m).Debug("Sending delayed message")
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
t, ok := s.topics[m.Topic] // If no subscribers, just mark message as published
|
t, ok := s.topics[m.Topic] // If no subscribers, just mark message as published
|
||||||
|
|||||||
@@ -38,8 +38,32 @@
|
|||||||
#
|
#
|
||||||
# firebase-key-file: <filename>
|
# firebase-key-file: <filename>
|
||||||
|
|
||||||
|
# If "database-url" is set, ntfy will use PostgreSQL for all database-backed stores (message cache,
|
||||||
|
# user manager, and web push subscriptions) instead of SQLite. When set, the "cache-file",
|
||||||
|
# "auth-file", and "web-push-file" options must not be set.
|
||||||
|
#
|
||||||
|
# Note: Setting "database-url" implicitly enables authentication and access control.
|
||||||
|
# The default access is "read-write" (see "auth-default-access").
|
||||||
|
#
|
||||||
|
# The URL supports standard PostgreSQL parameters (sslmode, connect_timeout, sslcert, etc.),
|
||||||
|
# as well as ntfy-specific connection pool parameters:
|
||||||
|
# pool_max_conns=10 - Maximum number of open connections (default: 10)
|
||||||
|
# pool_max_idle_conns=N - Maximum number of idle connections
|
||||||
|
# pool_conn_max_lifetime=5m - Maximum lifetime of a connection (Go duration)
|
||||||
|
# pool_conn_max_idle_time=1m - Maximum idle time of a connection (Go duration)
|
||||||
|
#
|
||||||
|
# See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
|
||||||
|
# for the full list of supported PostgreSQL connection parameters.
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# database-url: "postgres://user:pass@host:5432/ntfy"
|
||||||
|
# database-url: "postgres://user:pass@host:5432/ntfy?sslmode=require&pool_max_conns=50"
|
||||||
|
#
|
||||||
|
# database-url: <connection-string>
|
||||||
|
|
||||||
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
|
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
|
||||||
# This allows for service restarts without losing messages in support of the since= parameter.
|
# This allows for service restarts without losing messages in support of the since= parameter.
|
||||||
|
# Not required if "database-url" is set (messages are stored in PostgreSQL instead).
|
||||||
#
|
#
|
||||||
# The "cache-duration" parameter defines the duration for which messages will be buffered
|
# The "cache-duration" parameter defines the duration for which messages will be buffered
|
||||||
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
|
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
|
||||||
@@ -77,6 +101,8 @@
|
|||||||
# If set, access to the ntfy server and API can be controlled on a granular level using
|
# If set, access to the ntfy server and API can be controlled on a granular level using
|
||||||
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
|
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
|
||||||
#
|
#
|
||||||
|
# Note: If "database-url" is set, auth is implicitly enabled and "auth-file" must not be set.
|
||||||
|
#
|
||||||
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
|
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
|
||||||
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
|
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
|
||||||
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
|
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
|
||||||
@@ -197,6 +223,7 @@
|
|||||||
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
||||||
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
||||||
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. /var/cache/ntfy/webpush.db
|
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. /var/cache/ntfy/webpush.db
|
||||||
|
# Not required if "database-url" is set (subscriptions are stored in PostgreSQL instead).
|
||||||
# - web-push-email-address is the admin email address send to the push provider, e.g. sysadmin@example.com
|
# - web-push-email-address is the admin email address send to the push provider, e.g. sysadmin@example.com
|
||||||
# - web-push-startup-queries is an optional list of queries to run on startup
|
# - web-push-startup-queries is an optional list of queries to run on startup
|
||||||
# - web-push-expiry-warning-duration defines the duration after which unused subscriptions are sent a warning (default is 55d)
|
# - web-push-expiry-warning-duration defines the duration after which unused subscriptions are sent a warning (default is 55d)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -641,7 +642,7 @@ func (s *Server) publishSyncEvent(v *visitor) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m := newDefaultMessage(syncTopic.ID, string(messageBytes))
|
m := model.NewDefaultMessage(syncTopic.ID, string(messageBytes))
|
||||||
if err := syncTopic.Publish(v, m); err != nil {
|
if err := syncTopic.Publish(v, m); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
"io"
|
"io"
|
||||||
@@ -15,7 +16,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestAccount_Signup_Success(t *testing.T) {
|
func TestAccount_Signup_Success(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -49,10 +51,12 @@ func TestAccount_Signup_Success(t *testing.T) {
|
|||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
||||||
require.Equal(t, "phil", account.Username)
|
require.Equal(t, "phil", account.Username)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Signup_UserExists(t *testing.T) {
|
func TestAccount_Signup_UserExists(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -63,10 +67,12 @@ func TestAccount_Signup_UserExists(t *testing.T) {
|
|||||||
rr = request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
rr = request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
||||||
require.Equal(t, 409, rr.Code)
|
require.Equal(t, 409, rr.Code)
|
||||||
require.Equal(t, 40901, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 40901, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Signup_LimitReached(t *testing.T) {
|
func TestAccount_Signup_LimitReached(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -78,10 +84,12 @@ func TestAccount_Signup_LimitReached(t *testing.T) {
|
|||||||
rr := request(t, s, "POST", "/v1/account", `{"username":"thiswontwork", "password":"mypass"}`, nil)
|
rr := request(t, s, "POST", "/v1/account", `{"username":"thiswontwork", "password":"mypass"}`, nil)
|
||||||
require.Equal(t, 429, rr.Code)
|
require.Equal(t, 429, rr.Code)
|
||||||
require.Equal(t, 42906, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 42906, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Signup_AsUser(t *testing.T) {
|
func TestAccount_Signup_AsUser(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -100,10 +108,12 @@ func TestAccount_Signup_AsUser(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("ben", "ben"),
|
"Authorization": util.BasicAuth("ben", "ben"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Signup_Disabled(t *testing.T) {
|
func TestAccount_Signup_Disabled(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = false
|
conf.EnableSignup = false
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -111,10 +121,12 @@ func TestAccount_Signup_Disabled(t *testing.T) {
|
|||||||
rr := request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
rr := request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
||||||
require.Equal(t, 400, rr.Code)
|
require.Equal(t, 400, rr.Code)
|
||||||
require.Equal(t, 40022, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 40022, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Signup_Rate_Limit(t *testing.T) {
|
func TestAccount_Signup_Rate_Limit(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -125,10 +137,12 @@ func TestAccount_Signup_Rate_Limit(t *testing.T) {
|
|||||||
rr := request(t, s, "POST", "/v1/account", `{"username":"notallowed", "password":"mypass"}`, nil)
|
rr := request(t, s, "POST", "/v1/account", `{"username":"notallowed", "password":"mypass"}`, nil)
|
||||||
require.Equal(t, 429, rr.Code)
|
require.Equal(t, 429, rr.Code)
|
||||||
require.Equal(t, 42906, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 42906, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Get_Anonymous(t *testing.T) {
|
func TestAccount_Get_Anonymous(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.VisitorRequestLimitReplenish = 86 * time.Second
|
conf.VisitorRequestLimitReplenish = 86 * time.Second
|
||||||
conf.VisitorEmailLimitReplenish = time.Hour
|
conf.VisitorEmailLimitReplenish = time.Hour
|
||||||
conf.VisitorAttachmentTotalSizeLimit = 5123
|
conf.VisitorAttachmentTotalSizeLimit = 5123
|
||||||
@@ -168,10 +182,12 @@ func TestAccount_Get_Anonymous(t *testing.T) {
|
|||||||
require.Equal(t, int64(1002), account.Stats.MessagesRemaining)
|
require.Equal(t, int64(1002), account.Stats.MessagesRemaining)
|
||||||
require.Equal(t, int64(1), account.Stats.Emails)
|
require.Equal(t, int64(1), account.Stats.Emails)
|
||||||
require.Equal(t, int64(23), account.Stats.EmailsRemaining)
|
require.Equal(t, int64(23), account.Stats.EmailsRemaining)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_ChangeSettings(t *testing.T) {
|
func TestAccount_ChangeSettings(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
||||||
@@ -197,10 +213,12 @@ func TestAccount_ChangeSettings(t *testing.T) {
|
|||||||
require.Equal(t, util.Int(86400), account.Notification.DeleteAfter)
|
require.Equal(t, util.Int(86400), account.Notification.DeleteAfter)
|
||||||
require.Equal(t, util.String("juntos"), account.Notification.Sound)
|
require.Equal(t, util.String("juntos"), account.Notification.Sound)
|
||||||
require.Nil(t, account.Notification.MinPriority) // Not set
|
require.Nil(t, account.Notification.MinPriority) // Not set
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Subscription_AddUpdateDelete(t *testing.T) {
|
func TestAccount_Subscription_AddUpdateDelete(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
||||||
@@ -248,10 +266,12 @@ func TestAccount_Subscription_AddUpdateDelete(t *testing.T) {
|
|||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
||||||
require.Equal(t, 0, len(account.Subscriptions))
|
require.Equal(t, 0, len(account.Subscriptions))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_ChangePassword(t *testing.T) {
|
func TestAccount_ChangePassword(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.AuthUsers = []*user.User{
|
conf.AuthUsers = []*user.User{
|
||||||
{Name: "philuser", Hash: "$2a$10$U4WSIYY6evyGmZaraavM2e2JeVG6EMGUKN1uUwufUeeRd4Jpg6cGC", Role: user.RoleUser}, // philuser:philpass
|
{Name: "philuser", Hash: "$2a$10$U4WSIYY6evyGmZaraavM2e2JeVG6EMGUKN1uUwufUeeRd4Jpg6cGC", Role: user.RoleUser}, // philuser:philpass
|
||||||
}
|
}
|
||||||
@@ -291,19 +311,23 @@ func TestAccount_ChangePassword(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("philuser", "philpass"),
|
"Authorization": util.BasicAuth("philuser", "philpass"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 409, rr.Code)
|
require.Equal(t, 409, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_ChangePassword_NoAccount(t *testing.T) {
|
func TestAccount_ChangePassword_NoAccount(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
rr := request(t, s, "POST", "/v1/account/password", `{"password": "new password"}`, nil)
|
rr := request(t, s, "POST", "/v1/account/password", `{"password": "new password"}`, nil)
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_ExtendToken(t *testing.T) {
|
func TestAccount_ExtendToken(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
||||||
@@ -336,10 +360,12 @@ func TestAccount_ExtendToken(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "some label", token.Label)
|
require.Equal(t, "some label", token.Label)
|
||||||
require.Equal(t, expires.Unix(), token.Expires)
|
require.Equal(t, expires.Unix(), token.Expires)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_ExtendToken_NoTokenProvided(t *testing.T) {
|
func TestAccount_ExtendToken_NoTokenProvided(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
||||||
@@ -349,10 +375,12 @@ func TestAccount_ExtendToken_NoTokenProvided(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.Equal(t, 400, rr.Code)
|
require.Equal(t, 400, rr.Code)
|
||||||
require.Equal(t, 40023, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 40023, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_DeleteToken(t *testing.T) {
|
func TestAccount_DeleteToken(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
|
||||||
@@ -389,10 +417,12 @@ func TestAccount_DeleteToken(t *testing.T) {
|
|||||||
"Authorization": util.BearerAuth(token.Token),
|
"Authorization": util.BearerAuth(token.Token),
|
||||||
})
|
})
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Delete_Success(t *testing.T) {
|
func TestAccount_Delete_Success(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -418,10 +448,12 @@ func TestAccount_Delete_Success(t *testing.T) {
|
|||||||
// Cannot re-create account, since still exists
|
// Cannot re-create account, since still exists
|
||||||
rr = request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
rr = request(t, s, "POST", "/v1/account", `{"username":"phil", "password":"mypass"}`, nil)
|
||||||
require.Equal(t, 409, rr.Code)
|
require.Equal(t, 409, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Delete_Not_Allowed(t *testing.T) {
|
func TestAccount_Delete_Not_Allowed(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -439,10 +471,12 @@ func TestAccount_Delete_Not_Allowed(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.Equal(t, 400, rr.Code)
|
require.Equal(t, 400, rr.Code)
|
||||||
require.Equal(t, 40026, toHTTPError(t, rr.Body.String()).Code)
|
require.Equal(t, 40026, toHTTPError(t, rr.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Reservation_AddWithoutTierFails(t *testing.T) {
|
func TestAccount_Reservation_AddWithoutTierFails(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -453,10 +487,12 @@ func TestAccount_Reservation_AddWithoutTierFails(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "mypass"),
|
"Authorization": util.BasicAuth("phil", "mypass"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Reservation_AddAdminSuccess(t *testing.T) {
|
func TestAccount_Reservation_AddAdminSuccess(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -505,10 +541,12 @@ func TestAccount_Reservation_AddAdminSuccess(t *testing.T) {
|
|||||||
reservations, err = s.userManager.Reservations("noadmin2")
|
reservations, err = s.userManager.Reservations("noadmin2")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 0, len(reservations))
|
require.Equal(t, 0, len(reservations))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Reservation_AddRemoveUserWithTierSuccess(t *testing.T) {
|
func TestAccount_Reservation_AddRemoveUserWithTierSuccess(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
conf.EnableReservations = true
|
conf.EnableReservations = true
|
||||||
conf.TwilioAccount = "dummy"
|
conf.TwilioAccount = "dummy"
|
||||||
@@ -591,10 +629,12 @@ func TestAccount_Reservation_AddRemoveUserWithTierSuccess(t *testing.T) {
|
|||||||
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
account, _ = util.UnmarshalJSON[apiAccountResponse](io.NopCloser(rr.Body))
|
||||||
require.Equal(t, 1, len(account.Reservations))
|
require.Equal(t, 1, len(account.Reservations))
|
||||||
require.Equal(t, "mytopic", account.Reservations[0].Topic)
|
require.Equal(t, "mytopic", account.Reservations[0].Topic)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Reservation_PublishByAnonymousFails(t *testing.T) {
|
func TestAccount_Reservation_PublishByAnonymousFails(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.AuthDefault = user.PermissionReadWrite
|
conf.AuthDefault = user.PermissionReadWrite
|
||||||
conf.EnableSignup = true
|
conf.EnableSignup = true
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
@@ -625,11 +665,13 @@ func TestAccount_Reservation_PublishByAnonymousFails(t *testing.T) {
|
|||||||
// Publish a message (as anonymous)
|
// Publish a message (as anonymous)
|
||||||
rr = request(t, s, "POST", "/mytopic", `Howdy`, nil)
|
rr = request(t, s, "POST", "/mytopic", `Howdy`, nil)
|
||||||
require.Equal(t, 403, rr.Code)
|
require.Equal(t, 403, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccount_Reservation_Delete_Messages_And_Attachments(t *testing.T) {
|
func TestAccount_Reservation_Delete_Messages_And_Attachments(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
conf := newTestConfigWithAuthFile(t)
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.AuthDefault = user.PermissionReadWrite
|
conf.AuthDefault = user.PermissionReadWrite
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
@@ -674,12 +716,12 @@ func TestAccount_Reservation_Delete_Messages_And_Attachments(t *testing.T) {
|
|||||||
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
||||||
|
|
||||||
// Pre-verify message count and file
|
// Pre-verify message count and file
|
||||||
ms, err := s.messageCache.Messages("mytopic1", sinceAllMessages, false)
|
ms, err := s.messageCache.Messages("mytopic1", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 1, len(ms))
|
require.Equal(t, 1, len(ms))
|
||||||
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
||||||
|
|
||||||
ms, err = s.messageCache.Messages("mytopic2", sinceAllMessages, false)
|
ms, err = s.messageCache.Messages("mytopic2", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 1, len(ms))
|
require.Equal(t, 1, len(ms))
|
||||||
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
||||||
@@ -700,25 +742,26 @@ func TestAccount_Reservation_Delete_Messages_And_Attachments(t *testing.T) {
|
|||||||
// Verify that messages and attachments were deleted
|
// Verify that messages and attachments were deleted
|
||||||
// This does not explicitly call the manager!
|
// This does not explicitly call the manager!
|
||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
ms, err := s.messageCache.Messages("mytopic1", sinceAllMessages, false)
|
ms, err := s.messageCache.Messages("mytopic1", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
return len(ms) == 0 && !util.FileExists(filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
return len(ms) == 0 && !util.FileExists(filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
||||||
})
|
})
|
||||||
|
|
||||||
ms, err = s.messageCache.Messages("mytopic1", sinceAllMessages, false)
|
ms, err = s.messageCache.Messages("mytopic1", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 0, len(ms))
|
require.Equal(t, 0, len(ms))
|
||||||
require.NoFileExists(t, filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
require.NoFileExists(t, filepath.Join(s.config.AttachmentCacheDir, m1.ID))
|
||||||
|
|
||||||
ms, err = s.messageCache.Messages("mytopic2", sinceAllMessages, false)
|
ms, err = s.messageCache.Messages("mytopic2", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 1, len(ms))
|
require.Equal(t, 1, len(ms))
|
||||||
require.Equal(t, m2.ID, ms[0].ID)
|
require.Equal(t, m2.ID, ms[0].ID)
|
||||||
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, m2.ID))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/*func TestAccount_Persist_UserStats_After_Tier_Change(t *testing.T) {
|
/*func TestAccount_Persist_UserStats_After_Tier_Change(t *testing.T) {
|
||||||
conf := newTestConfigWithAuthFile(t)
|
conf := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
conf.AuthDefault = user.PermissionReadWrite
|
conf.AuthDefault = user.PermissionReadWrite
|
||||||
conf.AuthStatsQueueWriterInterval = 300 * time.Millisecond
|
conf.AuthStatsQueueWriterInterval = 300 * time.Millisecond
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestVersion_Admin(t *testing.T) {
|
func TestVersion_Admin(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.BuildVersion = "1.2.3"
|
c.BuildVersion = "1.2.3"
|
||||||
c.BuildCommit = "abcdef0"
|
c.BuildCommit = "abcdef0"
|
||||||
c.BuildDate = "2026-02-08T00:00:00Z"
|
c.BuildDate = "2026-02-08T00:00:00Z"
|
||||||
@@ -43,10 +44,12 @@ func TestVersion_Admin(t *testing.T) {
|
|||||||
// Unauthenticated user cannot access /v1/version
|
// Unauthenticated user cannot access /v1/version
|
||||||
rr = request(t, s, "GET", "/v1/version", "", nil)
|
rr = request(t, s, "GET", "/v1/version", "", nil)
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_AddRemove(t *testing.T) {
|
func TestUser_AddRemove(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin, tier
|
// Create admin, tier
|
||||||
@@ -99,10 +102,12 @@ func TestUser_AddRemove(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "phil"),
|
"Authorization": util.BasicAuth("phil", "phil"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 400, rr.Code)
|
require.Equal(t, 400, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_AddWithPasswordHash(t *testing.T) {
|
func TestUser_AddWithPasswordHash(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin
|
// Create admin
|
||||||
@@ -128,10 +133,12 @@ func TestUser_AddWithPasswordHash(t *testing.T) {
|
|||||||
require.Equal(t, user.RoleAdmin, users[0].Role)
|
require.Equal(t, user.RoleAdmin, users[0].Role)
|
||||||
require.Equal(t, "ben", users[1].Name)
|
require.Equal(t, "ben", users[1].Name)
|
||||||
require.Equal(t, user.RoleUser, users[1].Role)
|
require.Equal(t, user.RoleUser, users[1].Role)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_ChangeUserPassword(t *testing.T) {
|
func TestUser_ChangeUserPassword(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin
|
// Create admin
|
||||||
@@ -166,10 +173,12 @@ func TestUser_ChangeUserPassword(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("ben", "ben-two"),
|
"Authorization": util.BasicAuth("ben", "ben-two"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_ChangeUserTier(t *testing.T) {
|
func TestUser_ChangeUserTier(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin, tier
|
// Create admin, tier
|
||||||
@@ -206,10 +215,12 @@ func TestUser_ChangeUserTier(t *testing.T) {
|
|||||||
users, err = s.userManager.Users()
|
users, err = s.userManager.Users()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "tier2", users[1].Tier.Code)
|
require.Equal(t, "tier2", users[1].Tier.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_ChangeUserPasswordAndTier(t *testing.T) {
|
func TestUser_ChangeUserPasswordAndTier(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin, tier
|
// Create admin, tier
|
||||||
@@ -258,10 +269,12 @@ func TestUser_ChangeUserPasswordAndTier(t *testing.T) {
|
|||||||
users, err = s.userManager.Users()
|
users, err = s.userManager.Users()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "tier2", users[1].Tier.Code)
|
require.Equal(t, "tier2", users[1].Tier.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_ChangeUserPasswordWithHash(t *testing.T) {
|
func TestUser_ChangeUserPasswordWithHash(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin
|
// Create admin
|
||||||
@@ -290,10 +303,12 @@ func TestUser_ChangeUserPasswordWithHash(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("ben", "ben"),
|
"Authorization": util.BasicAuth("ben", "ben"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_DontChangeAdminPassword(t *testing.T) {
|
func TestUser_DontChangeAdminPassword(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin
|
// Create admin
|
||||||
@@ -305,10 +320,12 @@ func TestUser_DontChangeAdminPassword(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "phil"),
|
"Authorization": util.BasicAuth("phil", "phil"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 403, rr.Code)
|
require.Equal(t, 403, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUser_AddRemove_Failures(t *testing.T) {
|
func TestUser_AddRemove_Failures(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithAuthFile(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithAuthFile(t, databaseURL))
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
|
|
||||||
// Create admin
|
// Create admin
|
||||||
@@ -344,10 +361,12 @@ func TestUser_AddRemove_Failures(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "phil"),
|
"Authorization": util.BasicAuth("phil", "phil"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccess_AllowReset(t *testing.T) {
|
func TestAccess_AllowReset(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.AuthDefault = user.PermissionDenyAll
|
c.AuthDefault = user.PermissionDenyAll
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -385,10 +404,12 @@ func TestAccess_AllowReset(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("ben", "ben"),
|
"Authorization": util.BasicAuth("ben", "ben"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 403, rr.Code)
|
require.Equal(t, 403, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccess_AllowReset_NonAdminAttempt(t *testing.T) {
|
func TestAccess_AllowReset_NonAdminAttempt(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.AuthDefault = user.PermissionDenyAll
|
c.AuthDefault = user.PermissionDenyAll
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -401,10 +422,12 @@ func TestAccess_AllowReset_NonAdminAttempt(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("ben", "ben"),
|
"Authorization": util.BasicAuth("ben", "ben"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccess_AllowReset_KillConnection(t *testing.T) {
|
func TestAccess_AllowReset_KillConnection(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.AuthDefault = user.PermissionDenyAll
|
c.AuthDefault = user.PermissionDenyAll
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
defer s.closeDatabases()
|
defer s.closeDatabases()
|
||||||
@@ -434,4 +457,5 @@ func TestAccess_AllowReset_KillConnection(t *testing.T) {
|
|||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
return timeTaken.Load() >= 500
|
return timeTaken.Load() >= 500
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"firebase.google.com/go/v4/messaging"
|
"firebase.google.com/go/v4/messaging"
|
||||||
"fmt"
|
"fmt"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -43,7 +44,7 @@ func newFirebaseClient(sender firebaseSender, auther user.Auther) *firebaseClien
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *firebaseClient) Send(v *visitor, m *message) error {
|
func (c *firebaseClient) Send(v *visitor, m *model.Message) error {
|
||||||
if !v.FirebaseAllowed() {
|
if !v.FirebaseAllowed() {
|
||||||
return errFirebaseTemporarilyBanned
|
return errFirebaseTemporarilyBanned
|
||||||
}
|
}
|
||||||
@@ -121,11 +122,11 @@ func (c *firebaseSenderImpl) Send(m *messaging.Message) error {
|
|||||||
// On Android, this will trigger the app to poll the topic and thereby displaying new messages.
|
// On Android, this will trigger the app to poll the topic and thereby displaying new messages.
|
||||||
// - If UpstreamBaseURL is set, messages are forwarded as poll requests to an upstream server and then forwarded
|
// - If UpstreamBaseURL is set, messages are forwarded as poll requests to an upstream server and then forwarded
|
||||||
// to Firebase here. This is mainly for iOS to support self-hosted servers.
|
// to Firebase here. This is mainly for iOS to support self-hosted servers.
|
||||||
func toFirebaseMessage(m *message, auther user.Auther) (*messaging.Message, error) {
|
func toFirebaseMessage(m *model.Message, auther user.Auther) (*messaging.Message, error) {
|
||||||
var data map[string]string // Mostly matches https://ntfy.sh/docs/subscribe/api/#json-message-format
|
var data map[string]string // Mostly matches https://ntfy.sh/docs/subscribe/api/#json-message-format
|
||||||
var apnsConfig *messaging.APNSConfig
|
var apnsConfig *messaging.APNSConfig
|
||||||
switch m.Event {
|
switch m.Event {
|
||||||
case keepaliveEvent, openEvent:
|
case model.KeepaliveEvent, model.OpenEvent:
|
||||||
data = map[string]string{
|
data = map[string]string{
|
||||||
"id": m.ID,
|
"id": m.ID,
|
||||||
"time": fmt.Sprintf("%d", m.Time),
|
"time": fmt.Sprintf("%d", m.Time),
|
||||||
@@ -133,7 +134,7 @@ func toFirebaseMessage(m *message, auther user.Auther) (*messaging.Message, erro
|
|||||||
"topic": m.Topic,
|
"topic": m.Topic,
|
||||||
}
|
}
|
||||||
apnsConfig = createAPNSBackgroundConfig(data)
|
apnsConfig = createAPNSBackgroundConfig(data)
|
||||||
case pollRequestEvent:
|
case model.PollRequestEvent:
|
||||||
data = map[string]string{
|
data = map[string]string{
|
||||||
"id": m.ID,
|
"id": m.ID,
|
||||||
"time": fmt.Sprintf("%d", m.Time),
|
"time": fmt.Sprintf("%d", m.Time),
|
||||||
@@ -143,7 +144,7 @@ func toFirebaseMessage(m *message, auther user.Auther) (*messaging.Message, erro
|
|||||||
"poll_id": m.PollID,
|
"poll_id": m.PollID,
|
||||||
}
|
}
|
||||||
apnsConfig = createAPNSAlertConfig(m, data)
|
apnsConfig = createAPNSAlertConfig(m, data)
|
||||||
case messageDeleteEvent, messageClearEvent:
|
case model.MessageDeleteEvent, model.MessageClearEvent:
|
||||||
data = map[string]string{
|
data = map[string]string{
|
||||||
"id": m.ID,
|
"id": m.ID,
|
||||||
"time": fmt.Sprintf("%d", m.Time),
|
"time": fmt.Sprintf("%d", m.Time),
|
||||||
@@ -152,7 +153,7 @@ func toFirebaseMessage(m *message, auther user.Auther) (*messaging.Message, erro
|
|||||||
"sequence_id": m.SequenceID,
|
"sequence_id": m.SequenceID,
|
||||||
}
|
}
|
||||||
apnsConfig = createAPNSBackgroundConfig(data)
|
apnsConfig = createAPNSBackgroundConfig(data)
|
||||||
case messageEvent:
|
case model.MessageEvent:
|
||||||
if auther != nil {
|
if auther != nil {
|
||||||
// If "anonymous read" for a topic is not allowed, we cannot send the message along
|
// If "anonymous read" for a topic is not allowed, we cannot send the message along
|
||||||
// via Firebase. Instead, we send a "poll_request" message, asking the client to poll.
|
// via Firebase. Instead, we send a "poll_request" message, asking the client to poll.
|
||||||
@@ -235,7 +236,7 @@ func maybeTruncateFCMMessage(m *messaging.Message) *messaging.Message {
|
|||||||
// createAPNSAlertConfig creates an APNS config for iOS notifications that show up as an alert (only relevant for iOS).
|
// createAPNSAlertConfig creates an APNS config for iOS notifications that show up as an alert (only relevant for iOS).
|
||||||
// We must set the Alert struct ("alert"), and we need to set MutableContent ("mutable-content"), so the Notification Service
|
// We must set the Alert struct ("alert"), and we need to set MutableContent ("mutable-content"), so the Notification Service
|
||||||
// Extension in iOS can modify the message.
|
// Extension in iOS can modify the message.
|
||||||
func createAPNSAlertConfig(m *message, data map[string]string) *messaging.APNSConfig {
|
func createAPNSAlertConfig(m *model.Message, data map[string]string) *messaging.APNSConfig {
|
||||||
apnsData := make(map[string]any)
|
apnsData := make(map[string]any)
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
apnsData[k] = v
|
apnsData[k] = v
|
||||||
@@ -296,8 +297,8 @@ func maybeTruncateAPNSBodyMessage(s string) string {
|
|||||||
//
|
//
|
||||||
// This empties all the fields that are not needed for a poll request and just sets the required fields,
|
// This empties all the fields that are not needed for a poll request and just sets the required fields,
|
||||||
// most importantly, the PollID.
|
// most importantly, the PollID.
|
||||||
func toPollRequest(m *message) *message {
|
func toPollRequest(m *model.Message) *model.Message {
|
||||||
pr := newPollRequestMessage(m.Topic, m.ID)
|
pr := model.NewPollRequestMessage(m.Topic, m.ID)
|
||||||
pr.ID = m.ID
|
pr.ID = m.ID
|
||||||
pr.Time = m.Time
|
pr.Time = m.Time
|
||||||
pr.Priority = m.Priority // Keep priority
|
pr.Priority = m.Priority // Keep priority
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,7 +22,7 @@ var (
|
|||||||
type firebaseClient struct {
|
type firebaseClient struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *firebaseClient) Send(v *visitor, m *message) error {
|
func (c *firebaseClient) Send(v *visitor, m *model.Message) error {
|
||||||
return errFirebaseNotAvailable
|
return errFirebaseNotAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -63,7 +64,7 @@ func (s *testFirebaseSender) Messages() []*messaging.Message {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFirebaseMessage_Keepalive(t *testing.T) {
|
func TestToFirebaseMessage_Keepalive(t *testing.T) {
|
||||||
m := newKeepaliveMessage("mytopic")
|
m := model.NewKeepaliveMessage("mytopic")
|
||||||
fbm, err := toFirebaseMessage(m, nil)
|
fbm, err := toFirebaseMessage(m, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "mytopic", fbm.Topic)
|
require.Equal(t, "mytopic", fbm.Topic)
|
||||||
@@ -94,7 +95,7 @@ func TestToFirebaseMessage_Keepalive(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFirebaseMessage_Open(t *testing.T) {
|
func TestToFirebaseMessage_Open(t *testing.T) {
|
||||||
m := newOpenMessage("mytopic")
|
m := model.NewOpenMessage("mytopic")
|
||||||
fbm, err := toFirebaseMessage(m, nil)
|
fbm, err := toFirebaseMessage(m, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "mytopic", fbm.Topic)
|
require.Equal(t, "mytopic", fbm.Topic)
|
||||||
@@ -125,13 +126,13 @@ func TestToFirebaseMessage_Open(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFirebaseMessage_Message_Normal_Allowed(t *testing.T) {
|
func TestToFirebaseMessage_Message_Normal_Allowed(t *testing.T) {
|
||||||
m := newDefaultMessage("mytopic", "this is a message")
|
m := model.NewDefaultMessage("mytopic", "this is a message")
|
||||||
m.Priority = 4
|
m.Priority = 4
|
||||||
m.Tags = []string{"tag 1", "tag2"}
|
m.Tags = []string{"tag 1", "tag2"}
|
||||||
m.Click = "https://google.com"
|
m.Click = "https://google.com"
|
||||||
m.Icon = "https://ntfy.sh/static/img/ntfy.png"
|
m.Icon = "https://ntfy.sh/static/img/ntfy.png"
|
||||||
m.Title = "some title"
|
m.Title = "some title"
|
||||||
m.Actions = []*action{
|
m.Actions = []*model.Action{
|
||||||
{
|
{
|
||||||
ID: "123",
|
ID: "123",
|
||||||
Action: "view",
|
Action: "view",
|
||||||
@@ -150,7 +151,7 @@ func TestToFirebaseMessage_Message_Normal_Allowed(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
m.Attachment = &attachment{
|
m.Attachment = &model.Attachment{
|
||||||
Name: "some file.jpg",
|
Name: "some file.jpg",
|
||||||
Type: "image/jpeg",
|
Type: "image/jpeg",
|
||||||
Size: 12345,
|
Size: 12345,
|
||||||
@@ -219,7 +220,7 @@ func TestToFirebaseMessage_Message_Normal_Allowed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFirebaseMessage_Message_Normal_Not_Allowed(t *testing.T) {
|
func TestToFirebaseMessage_Message_Normal_Not_Allowed(t *testing.T) {
|
||||||
m := newDefaultMessage("mytopic", "this is a message")
|
m := model.NewDefaultMessage("mytopic", "this is a message")
|
||||||
m.Priority = 5
|
m.Priority = 5
|
||||||
fbm, err := toFirebaseMessage(m, &testAuther{Allow: false}) // Not allowed!
|
fbm, err := toFirebaseMessage(m, &testAuther{Allow: false}) // Not allowed!
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@@ -250,7 +251,7 @@ func TestToFirebaseMessage_Message_Normal_Not_Allowed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFirebaseMessage_PollRequest(t *testing.T) {
|
func TestToFirebaseMessage_PollRequest(t *testing.T) {
|
||||||
m := newPollRequestMessage("mytopic", "fOv6k1QbCzo6")
|
m := model.NewPollRequestMessage("mytopic", "fOv6k1QbCzo6")
|
||||||
fbm, err := toFirebaseMessage(m, nil)
|
fbm, err := toFirebaseMessage(m, nil)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "mytopic", fbm.Topic)
|
require.Equal(t, "mytopic", fbm.Topic)
|
||||||
@@ -344,18 +345,18 @@ func TestMaybeTruncateFCMMessage_NotTooLong(t *testing.T) {
|
|||||||
func TestToFirebaseSender_Abuse(t *testing.T) {
|
func TestToFirebaseSender_Abuse(t *testing.T) {
|
||||||
sender := &testFirebaseSender{allowed: 2}
|
sender := &testFirebaseSender{allowed: 2}
|
||||||
client := newFirebaseClient(sender, &testAuther{})
|
client := newFirebaseClient(sender, &testAuther{})
|
||||||
visitor := newVisitor(newTestConfig(t), newMemTestCache(t), nil, netip.MustParseAddr("1.2.3.4"), nil)
|
visitor := newVisitor(newTestConfig(t, ""), newMemTestCache(t), nil, netip.MustParseAddr("1.2.3.4"), nil)
|
||||||
|
|
||||||
require.Nil(t, client.Send(visitor, &message{Topic: "mytopic"}))
|
require.Nil(t, client.Send(visitor, &model.Message{Topic: "mytopic"}))
|
||||||
require.Equal(t, 1, len(sender.Messages()))
|
require.Equal(t, 1, len(sender.Messages()))
|
||||||
|
|
||||||
require.Nil(t, client.Send(visitor, &message{Topic: "mytopic"}))
|
require.Nil(t, client.Send(visitor, &model.Message{Topic: "mytopic"}))
|
||||||
require.Equal(t, 2, len(sender.Messages()))
|
require.Equal(t, 2, len(sender.Messages()))
|
||||||
|
|
||||||
require.Equal(t, errFirebaseQuotaExceeded, client.Send(visitor, &message{Topic: "mytopic"}))
|
require.Equal(t, errFirebaseQuotaExceeded, client.Send(visitor, &model.Message{Topic: "mytopic"}))
|
||||||
require.Equal(t, 2, len(sender.Messages()))
|
require.Equal(t, 2, len(sender.Messages()))
|
||||||
|
|
||||||
sender.messages = make([]*messaging.Message, 0) // Reset to test that time limit is working
|
sender.messages = make([]*messaging.Message, 0) // Reset to test that time limit is working
|
||||||
require.Equal(t, errFirebaseTemporarilyBanned, client.Send(visitor, &message{Topic: "mytopic"}))
|
require.Equal(t, errFirebaseTemporarilyBanned, client.Send(visitor, &model.Message{Topic: "mytopic"}))
|
||||||
require.Equal(t, 0, len(sender.Messages()))
|
require.Equal(t, 0, len(sender.Messages()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,15 +17,10 @@ func (s *Server) execManager() {
|
|||||||
s.pruneMessages()
|
s.pruneMessages()
|
||||||
s.pruneAndNotifyWebPushSubscriptions()
|
s.pruneAndNotifyWebPushSubscriptions()
|
||||||
|
|
||||||
// Message count per topic
|
// Message count
|
||||||
var messagesCached int
|
messagesCached, err := s.messageCache.MessagesCount()
|
||||||
messageCounts, err := s.messageCache.MessageCounts()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Tag(tagManager).Err(err).Warn("Cannot get message counts")
|
log.Tag(tagManager).Err(err).Warn("Cannot get messages count")
|
||||||
messageCounts = make(map[string]int) // Empty, so we can continue
|
|
||||||
}
|
|
||||||
for _, count := range messageCounts {
|
|
||||||
messagesCached += count
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove subscriptions without subscribers
|
// Remove subscriptions without subscribers
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestServer_Manager_Prune_Messages_Without_Attachments_DoesNotPanic(t *testing.T) {
|
func TestServer_Manager_Prune_Messages_Without_Attachments_DoesNotPanic(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
// Tests that the manager runs without attachment-cache-dir set, see #617
|
// Tests that the manager runs without attachment-cache-dir set, see #617
|
||||||
c := newTestConfig(t)
|
c := newTestConfig(t, databaseURL)
|
||||||
c.AttachmentCacheDir = ""
|
c.AttachmentCacheDir = ""
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
|
|
||||||
@@ -24,5 +26,6 @@ func TestServer_Manager_Prune_Messages_Without_Attachments_DoesNotPanic(t *testi
|
|||||||
|
|
||||||
// Actually deleted
|
// Actually deleted
|
||||||
_, err := s.messageCache.Message(m.ID)
|
_, err := s.messageCache.Message(m.ID)
|
||||||
require.Equal(t, errMessageNotFound, err)
|
require.Equal(t, model.ErrMessageNotFound, err)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stripe/stripe-go/v74"
|
"github.com/stripe/stripe-go/v74"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/payments"
|
"heckel.io/ntfy/v2/payments"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
@@ -21,10 +22,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPayments_Tiers(t *testing.T) {
|
func TestPayments_Tiers(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
c.VisitorRequestLimitReplenish = 12 * time.Hour
|
c.VisitorRequestLimitReplenish = 12 * time.Hour
|
||||||
@@ -128,13 +130,15 @@ func TestPayments_Tiers(t *testing.T) {
|
|||||||
require.Equal(t, int64(999111), tier.Limits.AttachmentFileSize)
|
require.Equal(t, int64(999111), tier.Limits.AttachmentFileSize)
|
||||||
require.Equal(t, int64(888111), tier.Limits.AttachmentTotalSize)
|
require.Equal(t, int64(888111), tier.Limits.AttachmentTotalSize)
|
||||||
require.Equal(t, int64(3600), tier.Limits.AttachmentExpiryDuration)
|
require.Equal(t, int64(3600), tier.Limits.AttachmentExpiryDuration)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_SubscriptionCreate_NotAStripeCustomer_Success(t *testing.T) {
|
func TestPayments_SubscriptionCreate_NotAStripeCustomer_Success(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -161,13 +165,15 @@ func TestPayments_SubscriptionCreate_NotAStripeCustomer_Success(t *testing.T) {
|
|||||||
redirectResponse, err := util.UnmarshalJSON[apiAccountBillingSubscriptionCreateResponse](io.NopCloser(response.Body))
|
redirectResponse, err := util.UnmarshalJSON[apiAccountBillingSubscriptionCreateResponse](io.NopCloser(response.Body))
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "https://billing.stripe.com/abc/def", redirectResponse.RedirectURL)
|
require.Equal(t, "https://billing.stripe.com/abc/def", redirectResponse.RedirectURL)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_SubscriptionCreate_StripeCustomer_Success(t *testing.T) {
|
func TestPayments_SubscriptionCreate_StripeCustomer_Success(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -205,13 +211,15 @@ func TestPayments_SubscriptionCreate_StripeCustomer_Success(t *testing.T) {
|
|||||||
redirectResponse, err := util.UnmarshalJSON[apiAccountBillingSubscriptionCreateResponse](io.NopCloser(response.Body))
|
redirectResponse, err := util.UnmarshalJSON[apiAccountBillingSubscriptionCreateResponse](io.NopCloser(response.Body))
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, "https://billing.stripe.com/abc/def", redirectResponse.RedirectURL)
|
require.Equal(t, "https://billing.stripe.com/abc/def", redirectResponse.RedirectURL)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_AccountDelete_Cancels_Subscription(t *testing.T) {
|
func TestPayments_AccountDelete_Cancels_Subscription(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.EnableSignup = true
|
c.EnableSignup = true
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
@@ -250,9 +258,11 @@ func TestPayments_AccountDelete_Cancels_Subscription(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "mypass"),
|
"Authorization": util.BasicAuth("phil", "mypass"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 401, rr.Code)
|
require.Equal(t, 401, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_Checkout_Success_And_Increase_Rate_Limits_Reset_Visitor(t *testing.T) {
|
func TestPayments_Checkout_Success_And_Increase_Rate_Limits_Reset_Visitor(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
// This test is too overloaded, but it's also a great end-to-end a test.
|
// This test is too overloaded, but it's also a great end-to-end a test.
|
||||||
//
|
//
|
||||||
// It tests:
|
// It tests:
|
||||||
@@ -264,7 +274,7 @@ func TestPayments_Checkout_Success_And_Increase_Rate_Limits_Reset_Visitor(t *tes
|
|||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
c.VisitorRequestLimitBurst = 5
|
c.VisitorRequestLimitBurst = 5
|
||||||
@@ -415,9 +425,11 @@ func TestPayments_Checkout_Success_And_Increase_Rate_Limits_Reset_Visitor(t *tes
|
|||||||
require.Equal(t, int64(220), account.Limits.Messages)
|
require.Equal(t, int64(220), account.Limits.Messages)
|
||||||
require.Equal(t, int64(220), account.Stats.Messages)
|
require.Equal(t, int64(220), account.Stats.Messages)
|
||||||
require.Equal(t, int64(0), account.Stats.MessagesRemaining)
|
require.Equal(t, int64(0), account.Stats.MessagesRemaining)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(t *testing.T) {
|
func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// This tests incoming webhooks from Stripe to update a subscription:
|
// This tests incoming webhooks from Stripe to update a subscription:
|
||||||
@@ -428,7 +440,7 @@ func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(
|
|||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -535,18 +547,20 @@ func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(
|
|||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
s.execManager()
|
s.execManager()
|
||||||
|
|
||||||
ms, err := s.messageCache.Messages("atopic", sinceAllMessages, false)
|
ms, err := s.messageCache.Messages("atopic", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 2, len(ms))
|
require.Equal(t, 2, len(ms))
|
||||||
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, a2.ID))
|
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, a2.ID))
|
||||||
|
|
||||||
ms, err = s.messageCache.Messages("ztopic", sinceAllMessages, false)
|
ms, err = s.messageCache.Messages("ztopic", model.SinceAllMessages, false)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 0, len(ms))
|
require.Equal(t, 0, len(ms))
|
||||||
require.NoFileExists(t, filepath.Join(s.config.AttachmentCacheDir, z2.ID))
|
require.NoFileExists(t, filepath.Join(s.config.AttachmentCacheDir, z2.ID))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_Webhook_Subscription_Deleted(t *testing.T) {
|
func TestPayments_Webhook_Subscription_Deleted(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
// This tests incoming webhooks from Stripe to delete a subscription. It verifies that the database is
|
// This tests incoming webhooks from Stripe to delete a subscription. It verifies that the database is
|
||||||
// updated (all Stripe fields are deleted, and the tier is removed).
|
// updated (all Stripe fields are deleted, and the tier is removed).
|
||||||
//
|
//
|
||||||
@@ -555,7 +569,7 @@ func TestPayments_Webhook_Subscription_Deleted(t *testing.T) {
|
|||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -609,13 +623,15 @@ func TestPayments_Webhook_Subscription_Deleted(t *testing.T) {
|
|||||||
r, err := s.userManager.Reservations("phil")
|
r, err := s.userManager.Reservations("phil")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 0, len(r))
|
require.Equal(t, 0, len(r))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_Subscription_Update_Different_Tier(t *testing.T) {
|
func TestPayments_Subscription_Update_Different_Tier(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -673,13 +689,15 @@ func TestPayments_Subscription_Update_Different_Tier(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "phil"),
|
"Authorization": util.BasicAuth("phil", "phil"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_Subscription_Delete_At_Period_End(t *testing.T) {
|
func TestPayments_Subscription_Delete_At_Period_End(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -704,13 +722,15 @@ func TestPayments_Subscription_Delete_At_Period_End(t *testing.T) {
|
|||||||
"Authorization": util.BasicAuth("phil", "phil"),
|
"Authorization": util.BasicAuth("phil", "phil"),
|
||||||
})
|
})
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPayments_CreatePortalSession(t *testing.T) {
|
func TestPayments_CreatePortalSession(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
stripeMock := &testStripeAPI{}
|
stripeMock := &testStripeAPI{}
|
||||||
defer stripeMock.AssertExpectations(t)
|
defer stripeMock.AssertExpectations(t)
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.StripeSecretKey = "secret key"
|
c.StripeSecretKey = "secret key"
|
||||||
c.StripeWebhookKey = "webhook key"
|
c.StripeWebhookKey = "webhook key"
|
||||||
s := newTestServer(t, c)
|
s := newTestServer(t, c)
|
||||||
@@ -740,6 +760,7 @@ func TestPayments_CreatePortalSession(t *testing.T) {
|
|||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
ps, _ := util.UnmarshalJSON[apiAccountBillingPortalRedirectResponse](io.NopCloser(rr.Body))
|
ps, _ := util.UnmarshalJSON[apiAccountBillingPortalRedirectResponse](io.NopCloser(rr.Body))
|
||||||
require.Equal(t, "https://billing.stripe.com/blablabla", ps.RedirectURL)
|
require.Equal(t, "https://billing.stripe.com/blablabla", ps.RedirectURL)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type testStripeAPI struct {
|
type testStripeAPI struct {
|
||||||
|
|||||||
5
server/server_race_off_test.go
Normal file
5
server/server_race_off_test.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//go:build !race
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
const raceEnabled = false
|
||||||
5
server/server_race_on_test.go
Normal file
5
server/server_race_on_test.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
//go:build race
|
||||||
|
|
||||||
|
package server
|
||||||
|
|
||||||
|
const raceEnabled = true
|
||||||
File diff suppressed because one or more lines are too long
@@ -11,6 +11,7 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
@@ -76,7 +77,7 @@ func (s *Server) convertPhoneNumber(u *user.User, phoneNumber string) (string, *
|
|||||||
|
|
||||||
// callPhone calls the Twilio API to make a phone call to the given phone number, using the given message.
|
// callPhone calls the Twilio API to make a phone call to the given phone number, using the given message.
|
||||||
// Failures will be logged, but not returned to the caller.
|
// Failures will be logged, but not returned to the caller.
|
||||||
func (s *Server) callPhone(v *visitor, r *http.Request, m *message, to string) {
|
func (s *Server) callPhone(v *visitor, r *http.Request, m *model.Message, to string) {
|
||||||
u, sender := v.User(), m.Sender.String()
|
u, sender := v.User(), m.Sender.String()
|
||||||
if u != nil {
|
if u != nil {
|
||||||
sender = u.Name
|
sender = u.Name
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Add_Verify_Call_Delete_Success(t *testing.T) {
|
func TestServer_Twilio_Call_Add_Verify_Call_Delete_Success(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
var called, verified atomic.Bool
|
var called, verified atomic.Bool
|
||||||
var code atomic.Pointer[string]
|
var code atomic.Pointer[string]
|
||||||
twilioVerifyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
twilioVerifyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -50,7 +51,7 @@ func TestServer_Twilio_Call_Add_Verify_Call_Delete_Success(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer twilioCallsServer.Close()
|
defer twilioCallsServer.Close()
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioVerifyBaseURL = twilioVerifyServer.URL
|
c.TwilioVerifyBaseURL = twilioVerifyServer.URL
|
||||||
c.TwilioCallsBaseURL = twilioCallsServer.URL
|
c.TwilioCallsBaseURL = twilioCallsServer.URL
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
@@ -112,9 +113,11 @@ func TestServer_Twilio_Call_Add_Verify_Call_Delete_Success(t *testing.T) {
|
|||||||
phoneNumbers, err = s.userManager.PhoneNumbers(u.ID)
|
phoneNumbers, err = s.userManager.PhoneNumbers(u.ID)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, 0, len(phoneNumbers))
|
require.Equal(t, 0, len(phoneNumbers))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Success(t *testing.T) {
|
func TestServer_Twilio_Call_Success(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
var called atomic.Bool
|
var called atomic.Bool
|
||||||
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if called.Load() {
|
if called.Load() {
|
||||||
@@ -129,7 +132,7 @@ func TestServer_Twilio_Call_Success(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer twilioServer.Close()
|
defer twilioServer.Close()
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = twilioServer.URL
|
c.TwilioCallsBaseURL = twilioServer.URL
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -157,9 +160,11 @@ func TestServer_Twilio_Call_Success(t *testing.T) {
|
|||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
return called.Load()
|
return called.Load()
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Success_With_Yes(t *testing.T) {
|
func TestServer_Twilio_Call_Success_With_Yes(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
var called atomic.Bool
|
var called atomic.Bool
|
||||||
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if called.Load() {
|
if called.Load() {
|
||||||
@@ -174,7 +179,7 @@ func TestServer_Twilio_Call_Success_With_Yes(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer twilioServer.Close()
|
defer twilioServer.Close()
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = twilioServer.URL
|
c.TwilioCallsBaseURL = twilioServer.URL
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -202,9 +207,11 @@ func TestServer_Twilio_Call_Success_With_Yes(t *testing.T) {
|
|||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
return called.Load()
|
return called.Load()
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Success_with_custom_twiml(t *testing.T) {
|
func TestServer_Twilio_Call_Success_with_custom_twiml(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
var called atomic.Bool
|
var called atomic.Bool
|
||||||
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
twilioServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if called.Load() {
|
if called.Load() {
|
||||||
@@ -219,7 +226,7 @@ func TestServer_Twilio_Call_Success_with_custom_twiml(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer twilioServer.Close()
|
defer twilioServer.Close()
|
||||||
|
|
||||||
c := newTestConfigWithAuthFile(t)
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = twilioServer.URL
|
c.TwilioCallsBaseURL = twilioServer.URL
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -263,10 +270,12 @@ func TestServer_Twilio_Call_Success_with_custom_twiml(t *testing.T) {
|
|||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
return called.Load()
|
return called.Load()
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_UnverifiedNumber(t *testing.T) {
|
func TestServer_Twilio_Call_UnverifiedNumber(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = "http://dummy.invalid"
|
c.TwilioCallsBaseURL = "http://dummy.invalid"
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -288,10 +297,12 @@ func TestServer_Twilio_Call_UnverifiedNumber(t *testing.T) {
|
|||||||
"x-call": "+11122233344",
|
"x-call": "+11122233344",
|
||||||
})
|
})
|
||||||
require.Equal(t, 40034, toHTTPError(t, response.Body.String()).Code)
|
require.Equal(t, 40034, toHTTPError(t, response.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_InvalidNumber(t *testing.T) {
|
func TestServer_Twilio_Call_InvalidNumber(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = "https://127.0.0.1"
|
c.TwilioCallsBaseURL = "https://127.0.0.1"
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -302,10 +313,12 @@ func TestServer_Twilio_Call_InvalidNumber(t *testing.T) {
|
|||||||
"x-call": "+invalid",
|
"x-call": "+invalid",
|
||||||
})
|
})
|
||||||
require.Equal(t, 40033, toHTTPError(t, response.Body.String()).Code)
|
require.Equal(t, 40033, toHTTPError(t, response.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Anonymous(t *testing.T) {
|
func TestServer_Twilio_Call_Anonymous(t *testing.T) {
|
||||||
c := newTestConfigWithAuthFile(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
c := newTestConfigWithAuthFile(t, databaseURL)
|
||||||
c.TwilioCallsBaseURL = "https://127.0.0.1"
|
c.TwilioCallsBaseURL = "https://127.0.0.1"
|
||||||
c.TwilioAccount = "AC1234567890"
|
c.TwilioAccount = "AC1234567890"
|
||||||
c.TwilioAuthToken = "AAEAA1234567890"
|
c.TwilioAuthToken = "AAEAA1234567890"
|
||||||
@@ -316,12 +329,15 @@ func TestServer_Twilio_Call_Anonymous(t *testing.T) {
|
|||||||
"x-call": "+123123",
|
"x-call": "+123123",
|
||||||
})
|
})
|
||||||
require.Equal(t, 40035, toHTTPError(t, response.Body.String()).Code)
|
require.Equal(t, 40035, toHTTPError(t, response.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_Twilio_Call_Unconfigured(t *testing.T) {
|
func TestServer_Twilio_Call_Unconfigured(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfig(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfig(t, databaseURL))
|
||||||
response := request(t, s, "POST", "/mytopic", "test", map[string]string{
|
response := request(t, s, "POST", "/mytopic", "test", map[string]string{
|
||||||
"x-call": "+1234",
|
"x-call": "+1234",
|
||||||
})
|
})
|
||||||
require.Equal(t, 40032, toHTTPError(t, response.Body.String()).Code)
|
require.Equal(t, 40032, toHTTPError(t, response.Body.String()).Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ import (
|
|||||||
|
|
||||||
"github.com/SherClockHolmes/webpush-go"
|
"github.com/SherClockHolmes/webpush-go"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
|
wpush "heckel.io/ntfy/v2/webpush"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -82,14 +84,14 @@ func (s *Server) handleWebPushDelete(w http.ResponseWriter, r *http.Request, _ *
|
|||||||
return s.writeJSON(w, newSuccessResponse())
|
return s.writeJSON(w, newSuccessResponse())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) publishToWebPushEndpoints(v *visitor, m *message) {
|
func (s *Server) publishToWebPushEndpoints(v *visitor, m *model.Message) {
|
||||||
subscriptions, err := s.webPush.SubscriptionsForTopic(m.Topic)
|
subscriptions, err := s.webPush.SubscriptionsForTopic(m.Topic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logvm(v, m).Err(err).With(v, m).Warn("Unable to publish web push messages")
|
logvm(v, m).Err(err).With(v, m).Warn("Unable to publish web push messages")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Tag(tagWebPush).With(v, m).Debug("Publishing web push message to %d subscribers", len(subscriptions))
|
log.Tag(tagWebPush).With(v, m).Debug("Publishing web push message to %d subscribers", len(subscriptions))
|
||||||
payload, err := json.Marshal(newWebPushPayload(fmt.Sprintf("%s/%s", s.config.BaseURL, m.Topic), m.forJSON()))
|
payload, err := json.Marshal(newWebPushPayload(fmt.Sprintf("%s/%s", s.config.BaseURL, m.Topic), m.ForJSON()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Tag(tagWebPush).Err(err).With(v, m).Warn("Unable to marshal expiring payload")
|
log.Tag(tagWebPush).Err(err).With(v, m).Warn("Unable to marshal expiring payload")
|
||||||
return
|
return
|
||||||
@@ -128,7 +130,7 @@ func (s *Server) pruneAndNotifyWebPushSubscriptionsInternal() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
warningSent := make([]*webPushSubscription, 0)
|
warningSent := make([]*wpush.Subscription, 0)
|
||||||
for _, subscription := range subscriptions {
|
for _, subscription := range subscriptions {
|
||||||
if err := s.sendWebPushNotification(subscription, payload); err != nil {
|
if err := s.sendWebPushNotification(subscription, payload); err != nil {
|
||||||
log.Tag(tagWebPush).Err(err).With(subscription).Warn("Unable to publish expiry imminent warning")
|
log.Tag(tagWebPush).Err(err).With(subscription).Warn("Unable to publish expiry imminent warning")
|
||||||
@@ -143,7 +145,7 @@ func (s *Server) pruneAndNotifyWebPushSubscriptionsInternal() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendWebPushNotification(sub *webPushSubscription, message []byte, contexters ...log.Contexter) error {
|
func (s *Server) sendWebPushNotification(sub *wpush.Subscription, message []byte, contexters ...log.Contexter) error {
|
||||||
log.Tag(tagWebPush).With(sub).With(contexters...).Debug("Sending web push message")
|
log.Tag(tagWebPush).With(sub).With(contexters...).Debug("Sending web push message")
|
||||||
payload := &webpush.Subscription{
|
payload := &webpush.Subscription{
|
||||||
Endpoint: sub.Endpoint,
|
Endpoint: sub.Endpoint,
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -20,7 +22,7 @@ func (s *Server) handleWebPushDelete(w http.ResponseWriter, r *http.Request, _ *
|
|||||||
return errHTTPNotFound
|
return errHTTPNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) publishToWebPushEndpoints(v *visitor, m *message) {
|
func (s *Server) publishToWebPushEndpoints(v *visitor, m *model.Message) {
|
||||||
// Nothing to see here
|
// Nothing to see here
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,6 @@ package server
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/SherClockHolmes/webpush-go"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"heckel.io/ntfy/v2/user"
|
|
||||||
"heckel.io/ntfy/v2/util"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
@@ -18,6 +14,11 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/SherClockHolmes/webpush-go"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/user"
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -25,36 +26,41 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestServer_WebPush_Enabled(t *testing.T) {
|
func TestServer_WebPush_Enabled(t *testing.T) {
|
||||||
conf := newTestConfig(t)
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
conf := newTestConfig(t, databaseURL)
|
||||||
conf.WebRoot = "" // Disable web app
|
conf.WebRoot = "" // Disable web app
|
||||||
s := newTestServer(t, conf)
|
s := newTestServer(t, conf)
|
||||||
|
|
||||||
rr := request(t, s, "GET", "/manifest.webmanifest", "", nil)
|
rr := request(t, s, "GET", "/manifest.webmanifest", "", nil)
|
||||||
require.Equal(t, 404, rr.Code)
|
require.Equal(t, 404, rr.Code)
|
||||||
|
|
||||||
conf2 := newTestConfig(t)
|
conf2 := newTestConfig(t, databaseURL)
|
||||||
s2 := newTestServer(t, conf2)
|
s2 := newTestServer(t, conf2)
|
||||||
|
|
||||||
rr = request(t, s2, "GET", "/manifest.webmanifest", "", nil)
|
rr = request(t, s2, "GET", "/manifest.webmanifest", "", nil)
|
||||||
require.Equal(t, 404, rr.Code)
|
require.Equal(t, 404, rr.Code)
|
||||||
|
|
||||||
conf3 := newTestConfigWithWebPush(t)
|
conf3 := newTestConfigWithWebPush(t, databaseURL)
|
||||||
s3 := newTestServer(t, conf3)
|
s3 := newTestServer(t, conf3)
|
||||||
|
|
||||||
rr = request(t, s3, "GET", "/manifest.webmanifest", "", nil)
|
rr = request(t, s3, "GET", "/manifest.webmanifest", "", nil)
|
||||||
require.Equal(t, 200, rr.Code)
|
require.Equal(t, 200, rr.Code)
|
||||||
require.Equal(t, "application/manifest+json", rr.Header().Get("Content-Type"))
|
require.Equal(t, "application/manifest+json", rr.Header().Get("Content-Type"))
|
||||||
|
|
||||||
|
})
|
||||||
}
|
}
|
||||||
func TestServer_WebPush_Disabled(t *testing.T) {
|
func TestServer_WebPush_Disabled(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfig(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfig(t, databaseURL))
|
||||||
|
|
||||||
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, testWebPushEndpoint), nil)
|
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, testWebPushEndpoint), nil)
|
||||||
require.Equal(t, 404, response.Code)
|
require.Equal(t, 404, response.Code)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicAdd(t *testing.T) {
|
func TestServer_WebPush_TopicAdd(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, testWebPushEndpoint), nil)
|
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, testWebPushEndpoint), nil)
|
||||||
require.Equal(t, 200, response.Code)
|
require.Equal(t, 200, response.Code)
|
||||||
@@ -68,18 +74,22 @@ func TestServer_WebPush_TopicAdd(t *testing.T) {
|
|||||||
require.Equal(t, subs[0].P256dh, "p256dh-key")
|
require.Equal(t, subs[0].P256dh, "p256dh-key")
|
||||||
require.Equal(t, subs[0].Auth, "auth-key")
|
require.Equal(t, subs[0].Auth, "auth-key")
|
||||||
require.Equal(t, subs[0].UserID, "")
|
require.Equal(t, subs[0].UserID, "")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicAdd_InvalidEndpoint(t *testing.T) {
|
func TestServer_WebPush_TopicAdd_InvalidEndpoint(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, "https://ddos-target.example.com/webpush"), nil)
|
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, []string{"test-topic"}, "https://ddos-target.example.com/webpush"), nil)
|
||||||
require.Equal(t, 400, response.Code)
|
require.Equal(t, 400, response.Code)
|
||||||
require.Equal(t, `{"code":40039,"http":400,"error":"invalid request: web push endpoint unknown"}`+"\n", response.Body.String())
|
require.Equal(t, `{"code":40039,"http":400,"error":"invalid request: web push endpoint unknown"}`+"\n", response.Body.String())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicAdd_TooManyTopics(t *testing.T) {
|
func TestServer_WebPush_TopicAdd_TooManyTopics(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
topicList := make([]string, 51)
|
topicList := make([]string, 51)
|
||||||
for i := range topicList {
|
for i := range topicList {
|
||||||
@@ -89,10 +99,12 @@ func TestServer_WebPush_TopicAdd_TooManyTopics(t *testing.T) {
|
|||||||
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, topicList, testWebPushEndpoint), nil)
|
response := request(t, s, "POST", "/v1/webpush", payloadForTopics(t, topicList, testWebPushEndpoint), nil)
|
||||||
require.Equal(t, 400, response.Code)
|
require.Equal(t, 400, response.Code)
|
||||||
require.Equal(t, `{"code":40040,"http":400,"error":"invalid request: too many web push topic subscriptions"}`+"\n", response.Body.String())
|
require.Equal(t, `{"code":40040,"http":400,"error":"invalid request: too many web push topic subscriptions"}`+"\n", response.Body.String())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicUnsubscribe(t *testing.T) {
|
func TestServer_WebPush_TopicUnsubscribe(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
addSubscription(t, s, testWebPushEndpoint, "test-topic")
|
addSubscription(t, s, testWebPushEndpoint, "test-topic")
|
||||||
requireSubscriptionCount(t, s, "test-topic", 1)
|
requireSubscriptionCount(t, s, "test-topic", 1)
|
||||||
@@ -102,10 +114,12 @@ func TestServer_WebPush_TopicUnsubscribe(t *testing.T) {
|
|||||||
require.Equal(t, `{"success":true}`+"\n", response.Body.String())
|
require.Equal(t, `{"success":true}`+"\n", response.Body.String())
|
||||||
|
|
||||||
requireSubscriptionCount(t, s, "test-topic", 0)
|
requireSubscriptionCount(t, s, "test-topic", 0)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_Delete(t *testing.T) {
|
func TestServer_WebPush_Delete(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
addSubscription(t, s, testWebPushEndpoint, "test-topic")
|
addSubscription(t, s, testWebPushEndpoint, "test-topic")
|
||||||
requireSubscriptionCount(t, s, "test-topic", 1)
|
requireSubscriptionCount(t, s, "test-topic", 1)
|
||||||
@@ -115,10 +129,12 @@ func TestServer_WebPush_Delete(t *testing.T) {
|
|||||||
require.Equal(t, `{"success":true}`+"\n", response.Body.String())
|
require.Equal(t, `{"success":true}`+"\n", response.Body.String())
|
||||||
|
|
||||||
requireSubscriptionCount(t, s, "test-topic", 0)
|
requireSubscriptionCount(t, s, "test-topic", 0)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicSubscribeProtected_Allowed(t *testing.T) {
|
func TestServer_WebPush_TopicSubscribeProtected_Allowed(t *testing.T) {
|
||||||
config := configureAuth(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
config := configureAuth(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
config.AuthDefault = user.PermissionDenyAll
|
config.AuthDefault = user.PermissionDenyAll
|
||||||
s := newTestServer(t, config)
|
s := newTestServer(t, config)
|
||||||
|
|
||||||
@@ -135,10 +151,12 @@ func TestServer_WebPush_TopicSubscribeProtected_Allowed(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Len(t, subs, 1)
|
require.Len(t, subs, 1)
|
||||||
require.True(t, strings.HasPrefix(subs[0].UserID, "u_"))
|
require.True(t, strings.HasPrefix(subs[0].UserID, "u_"))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_TopicSubscribeProtected_Denied(t *testing.T) {
|
func TestServer_WebPush_TopicSubscribeProtected_Denied(t *testing.T) {
|
||||||
config := configureAuth(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
config := configureAuth(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
config.AuthDefault = user.PermissionDenyAll
|
config.AuthDefault = user.PermissionDenyAll
|
||||||
s := newTestServer(t, config)
|
s := newTestServer(t, config)
|
||||||
|
|
||||||
@@ -146,10 +164,12 @@ func TestServer_WebPush_TopicSubscribeProtected_Denied(t *testing.T) {
|
|||||||
require.Equal(t, 403, response.Code)
|
require.Equal(t, 403, response.Code)
|
||||||
|
|
||||||
requireSubscriptionCount(t, s, "test-topic", 0)
|
requireSubscriptionCount(t, s, "test-topic", 0)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_DeleteAccountUnsubscribe(t *testing.T) {
|
func TestServer_WebPush_DeleteAccountUnsubscribe(t *testing.T) {
|
||||||
config := configureAuth(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
config := configureAuth(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
s := newTestServer(t, config)
|
s := newTestServer(t, config)
|
||||||
|
|
||||||
require.Nil(t, s.userManager.AddUser("ben", "ben", user.RoleUser, false))
|
require.Nil(t, s.userManager.AddUser("ben", "ben", user.RoleUser, false))
|
||||||
@@ -169,10 +189,12 @@ func TestServer_WebPush_DeleteAccountUnsubscribe(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// should've been deleted with the account
|
// should've been deleted with the account
|
||||||
requireSubscriptionCount(t, s, "test-topic", 0)
|
requireSubscriptionCount(t, s, "test-topic", 0)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_Publish(t *testing.T) {
|
func TestServer_WebPush_Publish(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
var received atomic.Bool
|
var received atomic.Bool
|
||||||
pushService := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
pushService := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -191,10 +213,12 @@ func TestServer_WebPush_Publish(t *testing.T) {
|
|||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
return received.Load()
|
return received.Load()
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_Publish_RemoveOnError(t *testing.T) {
|
func TestServer_WebPush_Publish_RemoveOnError(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
var received atomic.Bool
|
var received atomic.Bool
|
||||||
pushService := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
pushService := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -219,10 +243,12 @@ func TestServer_WebPush_Publish_RemoveOnError(t *testing.T) {
|
|||||||
|
|
||||||
requireSubscriptionCount(t, s, "test-topic", 0)
|
requireSubscriptionCount(t, s, "test-topic", 0)
|
||||||
requireSubscriptionCount(t, s, "test-topic-abc", 0)
|
requireSubscriptionCount(t, s, "test-topic-abc", 0)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServer_WebPush_Expiry(t *testing.T) {
|
func TestServer_WebPush_Expiry(t *testing.T) {
|
||||||
s := newTestServer(t, newTestConfigWithWebPush(t))
|
forEachBackend(t, func(t *testing.T, databaseURL string) {
|
||||||
|
s := newTestServer(t, newTestConfigWithWebPush(t, databaseURL))
|
||||||
|
|
||||||
var received atomic.Bool
|
var received atomic.Bool
|
||||||
|
|
||||||
@@ -235,11 +261,11 @@ func TestServer_WebPush_Expiry(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer pushService.Close()
|
defer pushService.Close()
|
||||||
|
|
||||||
addSubscription(t, s, pushService.URL+"/push-receive", "test-topic")
|
endpoint := pushService.URL + "/push-receive"
|
||||||
|
addSubscription(t, s, endpoint, "test-topic")
|
||||||
requireSubscriptionCount(t, s, "test-topic", 1)
|
requireSubscriptionCount(t, s, "test-topic", 1)
|
||||||
|
|
||||||
_, err := s.webPush.db.Exec("UPDATE subscription SET updated_at = ?", time.Now().Add(-55*24*time.Hour).Unix())
|
require.Nil(t, s.webPush.SetSubscriptionUpdatedAt(endpoint, time.Now().Add(-55*24*time.Hour).Unix()))
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
s.pruneAndNotifyWebPushSubscriptions()
|
s.pruneAndNotifyWebPushSubscriptions()
|
||||||
requireSubscriptionCount(t, s, "test-topic", 1)
|
requireSubscriptionCount(t, s, "test-topic", 1)
|
||||||
@@ -248,8 +274,7 @@ func TestServer_WebPush_Expiry(t *testing.T) {
|
|||||||
return received.Load()
|
return received.Load()
|
||||||
})
|
})
|
||||||
|
|
||||||
_, err = s.webPush.db.Exec("UPDATE subscription SET updated_at = ?", time.Now().Add(-60*24*time.Hour).Unix())
|
require.Nil(t, s.webPush.SetSubscriptionUpdatedAt(endpoint, time.Now().Add(-60*24*time.Hour).Unix()))
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
s.pruneAndNotifyWebPushSubscriptions()
|
s.pruneAndNotifyWebPushSubscriptions()
|
||||||
waitFor(t, func() bool {
|
waitFor(t, func() bool {
|
||||||
@@ -257,6 +282,7 @@ func TestServer_WebPush_Expiry(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
return len(subs) == 0
|
return len(subs) == 0
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func payloadForTopics(t *testing.T, topics []string, endpoint string) string {
|
func payloadForTopics(t *testing.T, topics []string, endpoint string) string {
|
||||||
@@ -281,11 +307,13 @@ func requireSubscriptionCount(t *testing.T, s *Server, topic string, expectedLen
|
|||||||
require.Len(t, subs, expectedLength)
|
require.Len(t, subs, expectedLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestConfigWithWebPush(t *testing.T) *Config {
|
func newTestConfigWithWebPush(t *testing.T, databaseURL string) *Config {
|
||||||
conf := newTestConfig(t)
|
conf := newTestConfig(t, databaseURL)
|
||||||
privateKey, publicKey, err := webpush.GenerateVAPIDKeys()
|
privateKey, publicKey, err := webpush.GenerateVAPIDKeys()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
if conf.DatabaseURL == "" {
|
||||||
conf.WebPushFile = filepath.Join(t.TempDir(), "webpush.db")
|
conf.WebPushFile = filepath.Join(t.TempDir(), "webpush.db")
|
||||||
|
}
|
||||||
conf.WebPushEmailAddress = "testing@example.com"
|
conf.WebPushEmailAddress = "testing@example.com"
|
||||||
conf.WebPushPrivateKey = privateKey
|
conf.WebPushPrivateKey = privateKey
|
||||||
conf.WebPushPublicKey = publicKey
|
conf.WebPushPublicKey = publicKey
|
||||||
|
|||||||
@@ -12,11 +12,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mailer interface {
|
type mailer interface {
|
||||||
Send(v *visitor, m *message, to string) error
|
Send(v *visitor, m *model.Message, to string) error
|
||||||
Counts() (total int64, success int64, failure int64)
|
Counts() (total int64, success int64, failure int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,7 +28,7 @@ type smtpSender struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *smtpSender) Send(v *visitor, m *message, to string) error {
|
func (s *smtpSender) Send(v *visitor, m *model.Message, to string) error {
|
||||||
return s.withCount(v, m, func() error {
|
return s.withCount(v, m, func() error {
|
||||||
host, _, err := net.SplitHostPort(s.config.SMTPSenderAddr)
|
host, _, err := net.SplitHostPort(s.config.SMTPSenderAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -63,7 +64,7 @@ func (s *smtpSender) Counts() (total int64, success int64, failure int64) {
|
|||||||
return s.success + s.failure, s.success, s.failure
|
return s.success + s.failure, s.success, s.failure
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *smtpSender) withCount(v *visitor, m *message, fn func() error) error {
|
func (s *smtpSender) withCount(v *visitor, m *model.Message, fn func() error) error {
|
||||||
err := fn()
|
err := fn()
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
@@ -76,7 +77,7 @@ func (s *smtpSender) withCount(v *visitor, m *message, fn func() error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatMail(baseURL, senderIP, from, to string, m *message) (string, error) {
|
func formatMail(baseURL, senderIP, from, to string, m *model.Message) (string, error) {
|
||||||
topicURL := baseURL + "/" + m.Topic
|
topicURL := baseURL + "/" + m.Topic
|
||||||
subject := m.Title
|
subject := m.Title
|
||||||
if subject == "" {
|
if subject == "" {
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFormatMail_Basic(t *testing.T) {
|
func TestFormatMail_Basic(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
@@ -27,7 +29,7 @@ This message was sent by 1.2.3.4 at Fri, 24 Dec 2021 21:43:24 UTC via https://nt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatMail_JustEmojis(t *testing.T) {
|
func TestFormatMail_JustEmojis(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
@@ -49,7 +51,7 @@ This message was sent by 1.2.3.4 at Fri, 24 Dec 2021 21:43:24 UTC via https://nt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatMail_JustOtherTags(t *testing.T) {
|
func TestFormatMail_JustOtherTags(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
@@ -73,7 +75,7 @@ This message was sent by 1.2.3.4 at Fri, 24 Dec 2021 21:43:24 UTC via https://nt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatMail_JustPriority(t *testing.T) {
|
func TestFormatMail_JustPriority(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
@@ -97,7 +99,7 @@ This message was sent by 1.2.3.4 at Fri, 24 Dec 2021 21:43:24 UTC via https://nt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatMail_UTF8Subject(t *testing.T) {
|
func TestFormatMail_UTF8Subject(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
@@ -119,7 +121,7 @@ This message was sent by 1.2.3.4 at Fri, 24 Dec 2021 21:43:24 UTC via https://nt
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFormatMail_WithAllTheThings(t *testing.T) {
|
func TestFormatMail_WithAllTheThings(t *testing.T) {
|
||||||
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &message{
|
actual, _ := formatMail("https://ntfy.sh", "1.2.3.4", "ntfy@ntfy.sh", "phil@example.com", &model.Message{
|
||||||
ID: "abc",
|
ID: "abc",
|
||||||
Time: 1640382204,
|
Time: 1640382204,
|
||||||
Event: "message",
|
Event: "message",
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/emersion/go-smtp"
|
"github.com/emersion/go-smtp"
|
||||||
"github.com/microcosm-cc/bluemonday"
|
"github.com/microcosm-cc/bluemonday"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -159,7 +160,7 @@ func (s *smtpSession) Data(r io.Reader) error {
|
|||||||
if len(body) > conf.MessageSizeLimit {
|
if len(body) > conf.MessageSizeLimit {
|
||||||
body = body[:conf.MessageSizeLimit]
|
body = body[:conf.MessageSizeLimit]
|
||||||
}
|
}
|
||||||
m := newDefaultMessage(s.topic, body)
|
m := model.NewDefaultMessage(s.topic, body)
|
||||||
subject := strings.TrimSpace(msg.Header.Get("Subject"))
|
subject := strings.TrimSpace(msg.Header.Get("Subject"))
|
||||||
if subject != "" {
|
if subject != "" {
|
||||||
dec := mime.WordDecoder{}
|
dec := mime.WordDecoder{}
|
||||||
@@ -184,7 +185,7 @@ func (s *smtpSession) Data(r io.Reader) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *smtpSession) publishMessage(m *message) error {
|
func (s *smtpSession) publishMessage(m *model.Message) error {
|
||||||
// Extract remote address (for rate limiting)
|
// Extract remote address (for rate limiting)
|
||||||
remoteAddr, _, err := net.SplitHostPort(s.conn.Conn().RemoteAddr().String())
|
remoteAddr, _, err := net.SplitHostPort(s.conn.Conn().RemoteAddr().String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1444,7 +1444,7 @@ what's up
|
|||||||
type smtpHandlerFunc func(http.ResponseWriter, *http.Request)
|
type smtpHandlerFunc func(http.ResponseWriter, *http.Request)
|
||||||
|
|
||||||
func newTestSMTPServer(t *testing.T, handler smtpHandlerFunc) (s *smtp.Server, c net.Conn, conf *Config, scanner *bufio.Scanner) {
|
func newTestSMTPServer(t *testing.T, handler smtpHandlerFunc) (s *smtp.Server, c net.Conn, conf *Config, scanner *bufio.Scanner) {
|
||||||
conf = newTestConfig(t)
|
conf = newTestConfig(t, "")
|
||||||
conf.SMTPServerListen = ":25"
|
conf.SMTPServerListen = ":25"
|
||||||
conf.SMTPServerDomain = "ntfy.sh"
|
conf.SMTPServerDomain = "ntfy.sh"
|
||||||
conf.SMTPServerAddrPrefix = "ntfy-"
|
conf.SMTPServerAddrPrefix = "ntfy-"
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -33,7 +34,7 @@ type topicSubscriber struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// subscriber is a function that is called for every new message on a topic
|
// subscriber is a function that is called for every new message on a topic
|
||||||
type subscriber func(v *visitor, msg *message) error
|
type subscriber func(v *visitor, msg *model.Message) error
|
||||||
|
|
||||||
// newTopic creates a new topic
|
// newTopic creates a new topic
|
||||||
func newTopic(id string) *topic {
|
func newTopic(id string) *topic {
|
||||||
@@ -103,7 +104,7 @@ func (t *topic) Unsubscribe(id int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Publish asynchronously publishes to all subscribers
|
// Publish asynchronously publishes to all subscribers
|
||||||
func (t *topic) Publish(v *visitor, m *message) error {
|
func (t *topic) Publish(v *visitor, m *model.Message) error {
|
||||||
go func() {
|
go func() {
|
||||||
// We want to lock the topic as short as possible, so we make a shallow copy of the
|
// We want to lock the topic as short as possible, so we make a shallow copy of the
|
||||||
// subscribers map here. Actually sending out the messages then doesn't have to lock.
|
// subscribers map here. Actually sending out the messages then doesn't have to lock.
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"heckel.io/ntfy/v2/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTopic_CancelSubscribersExceptUser(t *testing.T) {
|
func TestTopic_CancelSubscribersExceptUser(t *testing.T) {
|
||||||
subFn := func(v *visitor, msg *message) error {
|
subFn := func(v *visitor, msg *model.Message) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
canceled1 := atomic.Bool{}
|
canceled1 := atomic.Bool{}
|
||||||
@@ -33,7 +34,7 @@ func TestTopic_CancelSubscribersExceptUser(t *testing.T) {
|
|||||||
func TestTopic_CancelSubscribersUser(t *testing.T) {
|
func TestTopic_CancelSubscribersUser(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
subFn := func(v *visitor, msg *message) error {
|
subFn := func(v *visitor, msg *model.Message) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
canceled1 := atomic.Bool{}
|
canceled1 := atomic.Bool{}
|
||||||
@@ -76,7 +77,7 @@ func TestTopic_Subscribe_DuplicateID(t *testing.T) {
|
|||||||
cancel: func() {},
|
cancel: func() {},
|
||||||
}
|
}
|
||||||
|
|
||||||
subFn := func(v *visitor, msg *message) error {
|
subFn := func(v *visitor, msg *model.Message) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
214
server/types.go
214
server/types.go
@@ -2,109 +2,12 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/model"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// List of possible events
|
|
||||||
const (
|
|
||||||
openEvent = "open"
|
|
||||||
keepaliveEvent = "keepalive"
|
|
||||||
messageEvent = "message"
|
|
||||||
messageDeleteEvent = "message_delete"
|
|
||||||
messageClearEvent = "message_clear"
|
|
||||||
pollRequestEvent = "poll_request"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
messageIDLength = 12
|
|
||||||
)
|
|
||||||
|
|
||||||
// message represents a message published to a topic
|
|
||||||
type message struct {
|
|
||||||
ID string `json:"id"` // Random message ID
|
|
||||||
SequenceID string `json:"sequence_id,omitempty"` // Message sequence ID for updating message contents (omitted if same as ID)
|
|
||||||
Time int64 `json:"time"` // Unix time in seconds
|
|
||||||
Expires int64 `json:"expires,omitempty"` // Unix time in seconds (not required for open/keepalive)
|
|
||||||
Event string `json:"event"` // One of the above
|
|
||||||
Topic string `json:"topic"`
|
|
||||||
Title string `json:"title,omitempty"`
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
Priority int `json:"priority,omitempty"`
|
|
||||||
Tags []string `json:"tags,omitempty"`
|
|
||||||
Click string `json:"click,omitempty"`
|
|
||||||
Icon string `json:"icon,omitempty"`
|
|
||||||
Actions []*action `json:"actions,omitempty"`
|
|
||||||
Attachment *attachment `json:"attachment,omitempty"`
|
|
||||||
PollID string `json:"poll_id,omitempty"`
|
|
||||||
ContentType string `json:"content_type,omitempty"` // text/plain by default (if empty), or text/markdown
|
|
||||||
Encoding string `json:"encoding,omitempty"` // Empty for raw UTF-8, or "base64" for encoded bytes
|
|
||||||
Sender netip.Addr `json:"-"` // IP address of uploader, used for rate limiting
|
|
||||||
User string `json:"-"` // UserID of the uploader, used to associated attachments
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *message) Context() log.Context {
|
|
||||||
fields := map[string]any{
|
|
||||||
"topic": m.Topic,
|
|
||||||
"message_id": m.ID,
|
|
||||||
"message_sequence_id": m.SequenceID,
|
|
||||||
"message_time": m.Time,
|
|
||||||
"message_event": m.Event,
|
|
||||||
"message_body_size": len(m.Message),
|
|
||||||
}
|
|
||||||
if m.Sender.IsValid() {
|
|
||||||
fields["message_sender"] = m.Sender.String()
|
|
||||||
}
|
|
||||||
if m.User != "" {
|
|
||||||
fields["message_user"] = m.User
|
|
||||||
}
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// forJSON returns a copy of the message suitable for JSON output.
|
|
||||||
// It clears the SequenceID if it equals the ID to reduce redundancy.
|
|
||||||
func (m *message) forJSON() *message {
|
|
||||||
if m.SequenceID == m.ID {
|
|
||||||
clone := *m
|
|
||||||
clone.SequenceID = ""
|
|
||||||
return &clone
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type attachment struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
Size int64 `json:"size,omitempty"`
|
|
||||||
Expires int64 `json:"expires,omitempty"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type action struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Action string `json:"action"` // "view", "broadcast", "http", or "copy"
|
|
||||||
Label string `json:"label"` // action button label
|
|
||||||
Clear bool `json:"clear"` // clear notification after successful execution
|
|
||||||
URL string `json:"url,omitempty"` // used in "view" and "http" actions
|
|
||||||
Method string `json:"method,omitempty"` // used in "http" action, default is POST (!)
|
|
||||||
Headers map[string]string `json:"headers,omitempty"` // used in "http" action
|
|
||||||
Body string `json:"body,omitempty"` // used in "http" action
|
|
||||||
Intent string `json:"intent,omitempty"` // used in "broadcast" action
|
|
||||||
Extras map[string]string `json:"extras,omitempty"` // used in "broadcast" action
|
|
||||||
Value string `json:"value,omitempty"` // used in "copy" action
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAction() *action {
|
|
||||||
return &action{
|
|
||||||
Headers: make(map[string]string),
|
|
||||||
Extras: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// publishMessage is used as input when publishing as JSON
|
// publishMessage is used as input when publishing as JSON
|
||||||
type publishMessage struct {
|
type publishMessage struct {
|
||||||
Topic string `json:"topic"`
|
Topic string `json:"topic"`
|
||||||
@@ -115,7 +18,7 @@ type publishMessage struct {
|
|||||||
Tags []string `json:"tags"`
|
Tags []string `json:"tags"`
|
||||||
Click string `json:"click"`
|
Click string `json:"click"`
|
||||||
Icon string `json:"icon"`
|
Icon string `json:"icon"`
|
||||||
Actions []action `json:"actions"`
|
Actions []model.Action `json:"actions"`
|
||||||
Attach string `json:"attach"`
|
Attach string `json:"attach"`
|
||||||
Markdown bool `json:"markdown"`
|
Markdown bool `json:"markdown"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
@@ -127,94 +30,7 @@ type publishMessage struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// messageEncoder is a function that knows how to encode a message
|
// messageEncoder is a function that knows how to encode a message
|
||||||
type messageEncoder func(msg *message) (string, error)
|
type messageEncoder func(msg *model.Message) (string, error)
|
||||||
|
|
||||||
// newMessage creates a new message with the current timestamp
|
|
||||||
func newMessage(event, topic, msg string) *message {
|
|
||||||
return &message{
|
|
||||||
ID: util.RandomString(messageIDLength),
|
|
||||||
Time: time.Now().Unix(),
|
|
||||||
Event: event,
|
|
||||||
Topic: topic,
|
|
||||||
Message: msg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newOpenMessage is a convenience method to create an open message
|
|
||||||
func newOpenMessage(topic string) *message {
|
|
||||||
return newMessage(openEvent, topic, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// newKeepaliveMessage is a convenience method to create a keepalive message
|
|
||||||
func newKeepaliveMessage(topic string) *message {
|
|
||||||
return newMessage(keepaliveEvent, topic, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDefaultMessage is a convenience method to create a notification message
|
|
||||||
func newDefaultMessage(topic, msg string) *message {
|
|
||||||
return newMessage(messageEvent, topic, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newPollRequestMessage is a convenience method to create a poll request message
|
|
||||||
func newPollRequestMessage(topic, pollID string) *message {
|
|
||||||
m := newMessage(pollRequestEvent, topic, newMessageBody)
|
|
||||||
m.PollID = pollID
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// newActionMessage creates a new action message (message_delete or message_clear)
|
|
||||||
func newActionMessage(event, topic, sequenceID string) *message {
|
|
||||||
m := newMessage(event, topic, "")
|
|
||||||
m.SequenceID = sequenceID
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func validMessageID(s string) bool {
|
|
||||||
return util.ValidRandomString(s, messageIDLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
type sinceMarker struct {
|
|
||||||
time time.Time
|
|
||||||
id string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSinceTime(timestamp int64) sinceMarker {
|
|
||||||
return sinceMarker{time.Unix(timestamp, 0), ""}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSinceID(id string) sinceMarker {
|
|
||||||
return sinceMarker{time.Unix(0, 0), id}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) IsAll() bool {
|
|
||||||
return t == sinceAllMessages
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) IsNone() bool {
|
|
||||||
return t == sinceNoMessages
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) IsLatest() bool {
|
|
||||||
return t == sinceLatestMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) IsID() bool {
|
|
||||||
return t.id != "" && t.id != "latest"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) Time() time.Time {
|
|
||||||
return t.time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t sinceMarker) ID() string {
|
|
||||||
return t.id
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
sinceAllMessages = sinceMarker{time.Unix(0, 0), ""}
|
|
||||||
sinceNoMessages = sinceMarker{time.Unix(1, 0), ""}
|
|
||||||
sinceLatestMessage = sinceMarker{time.Unix(0, 0), "latest"}
|
|
||||||
)
|
|
||||||
|
|
||||||
type queryFilter struct {
|
type queryFilter struct {
|
||||||
ID string
|
ID string
|
||||||
@@ -246,8 +62,8 @@ func parseQueryFilters(r *http.Request) (*queryFilter, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *queryFilter) Pass(msg *message) bool {
|
func (q *queryFilter) Pass(msg *model.Message) bool {
|
||||||
if msg.Event != messageEvent && msg.Event != messageDeleteEvent && msg.Event != messageClearEvent {
|
if msg.Event != model.MessageEvent && msg.Event != model.MessageDeleteEvent && msg.Event != model.MessageClearEvent {
|
||||||
return true // filters only apply to messages
|
return true // filters only apply to messages
|
||||||
} else if q.ID != "" && msg.ID != q.ID {
|
} else if q.ID != "" && msg.ID != q.ID {
|
||||||
return false
|
return false
|
||||||
@@ -572,10 +388,10 @@ const (
|
|||||||
type webPushPayload struct {
|
type webPushPayload struct {
|
||||||
Event string `json:"event"`
|
Event string `json:"event"`
|
||||||
SubscriptionID string `json:"subscription_id"`
|
SubscriptionID string `json:"subscription_id"`
|
||||||
Message *message `json:"message"`
|
Message *model.Message `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWebPushPayload(subscriptionID string, message *message) *webPushPayload {
|
func newWebPushPayload(subscriptionID string, message *model.Message) *webPushPayload {
|
||||||
return &webPushPayload{
|
return &webPushPayload{
|
||||||
Event: webPushMessageEvent,
|
Event: webPushMessageEvent,
|
||||||
SubscriptionID: subscriptionID,
|
SubscriptionID: subscriptionID,
|
||||||
@@ -593,22 +409,6 @@ func newWebPushSubscriptionExpiringPayload() *webPushControlMessagePayload {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type webPushSubscription struct {
|
|
||||||
ID string
|
|
||||||
Endpoint string
|
|
||||||
Auth string
|
|
||||||
P256dh string
|
|
||||||
UserID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *webPushSubscription) Context() log.Context {
|
|
||||||
return map[string]any{
|
|
||||||
"web_push_subscription_id": w.ID,
|
|
||||||
"web_push_subscription_user_id": w.UserID,
|
|
||||||
"web_push_subscription_endpoint": w.Endpoint,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://developer.mozilla.org/en-US/docs/Web/Manifest
|
// https://developer.mozilla.org/en-US/docs/Web/Manifest
|
||||||
type webManifestResponse struct {
|
type webManifestResponse struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/message"
|
||||||
"heckel.io/ntfy/v2/user"
|
"heckel.io/ntfy/v2/user"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
@@ -53,7 +54,7 @@ const (
|
|||||||
// visitor represents an API user, and its associated rate.Limiter used for rate limiting
|
// visitor represents an API user, and its associated rate.Limiter used for rate limiting
|
||||||
type visitor struct {
|
type visitor struct {
|
||||||
config *Config
|
config *Config
|
||||||
messageCache *messageCache
|
messageCache *message.Cache
|
||||||
userManager *user.Manager // May be nil
|
userManager *user.Manager // May be nil
|
||||||
ip netip.Addr // Visitor IP address
|
ip netip.Addr // Visitor IP address
|
||||||
user *user.User // Only set if authenticated user, otherwise nil
|
user *user.User // Only set if authenticated user, otherwise nil
|
||||||
@@ -114,7 +115,7 @@ const (
|
|||||||
visitorLimitBasisTier = visitorLimitBasis("tier")
|
visitorLimitBasisTier = visitorLimitBasis("tier")
|
||||||
)
|
)
|
||||||
|
|
||||||
func newVisitor(conf *Config, messageCache *messageCache, userManager *user.Manager, ip netip.Addr, user *user.User) *visitor {
|
func newVisitor(conf *Config, messageCache *message.Cache, userManager *user.Manager, ip netip.Addr, user *user.User) *visitor {
|
||||||
var messages, emails, calls int64
|
var messages, emails, calls int64
|
||||||
if user != nil {
|
if user != nil {
|
||||||
messages = user.Stats.Messages
|
messages = user.Stats.Messages
|
||||||
|
|||||||
@@ -1,285 +0,0 @@
|
|||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"heckel.io/ntfy/v2/util"
|
|
||||||
"net/netip"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
subscriptionIDPrefix = "wps_"
|
|
||||||
subscriptionIDLength = 10
|
|
||||||
subscriptionEndpointLimitPerSubscriberIP = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errWebPushNoRows = errors.New("no rows found")
|
|
||||||
errWebPushTooManySubscriptions = errors.New("too many subscriptions")
|
|
||||||
errWebPushUserIDCannotBeEmpty = errors.New("user ID cannot be empty")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
createWebPushSubscriptionsTableQuery = `
|
|
||||||
BEGIN;
|
|
||||||
CREATE TABLE IF NOT EXISTS subscription (
|
|
||||||
id TEXT PRIMARY KEY,
|
|
||||||
endpoint TEXT NOT NULL,
|
|
||||||
key_auth TEXT NOT NULL,
|
|
||||||
key_p256dh TEXT NOT NULL,
|
|
||||||
user_id TEXT NOT NULL,
|
|
||||||
subscriber_ip TEXT NOT NULL,
|
|
||||||
updated_at INT NOT NULL,
|
|
||||||
warned_at INT NOT NULL DEFAULT 0
|
|
||||||
);
|
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_endpoint ON subscription (endpoint);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_subscriber_ip ON subscription (subscriber_ip);
|
|
||||||
CREATE TABLE IF NOT EXISTS subscription_topic (
|
|
||||||
subscription_id TEXT NOT NULL,
|
|
||||||
topic TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (subscription_id, topic),
|
|
||||||
FOREIGN KEY (subscription_id) REFERENCES subscription (id) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_topic ON subscription_topic (topic);
|
|
||||||
CREATE TABLE IF NOT EXISTS schemaVersion (
|
|
||||||
id INT PRIMARY KEY,
|
|
||||||
version INT NOT NULL
|
|
||||||
);
|
|
||||||
COMMIT;
|
|
||||||
`
|
|
||||||
builtinStartupQueries = `
|
|
||||||
PRAGMA foreign_keys = ON;
|
|
||||||
`
|
|
||||||
|
|
||||||
selectWebPushSubscriptionIDByEndpoint = `SELECT id FROM subscription WHERE endpoint = ?`
|
|
||||||
selectWebPushSubscriptionCountBySubscriberIP = `SELECT COUNT(*) FROM subscription WHERE subscriber_ip = ?`
|
|
||||||
selectWebPushSubscriptionsForTopicQuery = `
|
|
||||||
SELECT id, endpoint, key_auth, key_p256dh, user_id
|
|
||||||
FROM subscription_topic st
|
|
||||||
JOIN subscription s ON s.id = st.subscription_id
|
|
||||||
WHERE st.topic = ?
|
|
||||||
ORDER BY endpoint
|
|
||||||
`
|
|
||||||
selectWebPushSubscriptionsExpiringSoonQuery = `
|
|
||||||
SELECT id, endpoint, key_auth, key_p256dh, user_id
|
|
||||||
FROM subscription
|
|
||||||
WHERE warned_at = 0 AND updated_at <= ?
|
|
||||||
`
|
|
||||||
insertWebPushSubscriptionQuery = `
|
|
||||||
INSERT INTO subscription (id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at)
|
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
||||||
ON CONFLICT (endpoint)
|
|
||||||
DO UPDATE SET key_auth = excluded.key_auth, key_p256dh = excluded.key_p256dh, user_id = excluded.user_id, subscriber_ip = excluded.subscriber_ip, updated_at = excluded.updated_at, warned_at = excluded.warned_at
|
|
||||||
`
|
|
||||||
updateWebPushSubscriptionWarningSentQuery = `UPDATE subscription SET warned_at = ? WHERE id = ?`
|
|
||||||
deleteWebPushSubscriptionByEndpointQuery = `DELETE FROM subscription WHERE endpoint = ?`
|
|
||||||
deleteWebPushSubscriptionByUserIDQuery = `DELETE FROM subscription WHERE user_id = ?`
|
|
||||||
deleteWebPushSubscriptionByAgeQuery = `DELETE FROM subscription WHERE updated_at <= ?` // Full table scan!
|
|
||||||
|
|
||||||
insertWebPushSubscriptionTopicQuery = `INSERT INTO subscription_topic (subscription_id, topic) VALUES (?, ?)`
|
|
||||||
deleteWebPushSubscriptionTopicAllQuery = `DELETE FROM subscription_topic WHERE subscription_id = ?`
|
|
||||||
deleteWebPushSubscriptionTopicWithoutSubscription = `DELETE FROM subscription_topic WHERE subscription_id NOT IN (SELECT id FROM subscription)`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schema management queries
|
|
||||||
const (
|
|
||||||
currentWebPushSchemaVersion = 1
|
|
||||||
insertWebPushSchemaVersion = `INSERT INTO schemaVersion VALUES (1, ?)`
|
|
||||||
selectWebPushSchemaVersionQuery = `SELECT version FROM schemaVersion WHERE id = 1`
|
|
||||||
)
|
|
||||||
|
|
||||||
type webPushStore struct {
|
|
||||||
db *sql.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func newWebPushStore(filename, startupQueries string) (*webPushStore, error) {
|
|
||||||
db, err := sql.Open("sqlite3", filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := setupWebPushDB(db); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := runWebPushStartupQueries(db, startupQueries); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &webPushStore{
|
|
||||||
db: db,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupWebPushDB(db *sql.DB) error {
|
|
||||||
// If 'schemaVersion' table does not exist, this must be a new database
|
|
||||||
rows, err := db.Query(selectWebPushSchemaVersionQuery)
|
|
||||||
if err != nil {
|
|
||||||
return setupNewWebPushDB(db)
|
|
||||||
}
|
|
||||||
return rows.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupNewWebPushDB(db *sql.DB) error {
|
|
||||||
if _, err := db.Exec(createWebPushSubscriptionsTableQuery); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := db.Exec(insertWebPushSchemaVersion, currentWebPushSchemaVersion); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runWebPushStartupQueries(db *sql.DB, startupQueries string) error {
|
|
||||||
if _, err := db.Exec(startupQueries); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := db.Exec(builtinStartupQueries); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpsertSubscription adds or updates Web Push subscriptions for the given topics and user ID. It always first deletes all
|
|
||||||
// existing entries for a given endpoint.
|
|
||||||
func (c *webPushStore) UpsertSubscription(endpoint string, auth, p256dh, userID string, subscriberIP netip.Addr, topics []string) error {
|
|
||||||
tx, err := c.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
// Read number of subscriptions for subscriber IP address
|
|
||||||
rowsCount, err := tx.Query(selectWebPushSubscriptionCountBySubscriberIP, subscriberIP.String())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rowsCount.Close()
|
|
||||||
var subscriptionCount int
|
|
||||||
if !rowsCount.Next() {
|
|
||||||
return errWebPushNoRows
|
|
||||||
}
|
|
||||||
if err := rowsCount.Scan(&subscriptionCount); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := rowsCount.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Read existing subscription ID for endpoint (or create new ID)
|
|
||||||
rows, err := tx.Query(selectWebPushSubscriptionIDByEndpoint, endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
var subscriptionID string
|
|
||||||
if rows.Next() {
|
|
||||||
if err := rows.Scan(&subscriptionID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if subscriptionCount >= subscriptionEndpointLimitPerSubscriberIP {
|
|
||||||
return errWebPushTooManySubscriptions
|
|
||||||
}
|
|
||||||
subscriptionID = util.RandomStringPrefix(subscriptionIDPrefix, subscriptionIDLength)
|
|
||||||
}
|
|
||||||
if err := rows.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Insert or update subscription
|
|
||||||
updatedAt, warnedAt := time.Now().Unix(), 0
|
|
||||||
if _, err = tx.Exec(insertWebPushSubscriptionQuery, subscriptionID, endpoint, auth, p256dh, userID, subscriberIP.String(), updatedAt, warnedAt); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Replace all subscription topics
|
|
||||||
if _, err := tx.Exec(deleteWebPushSubscriptionTopicAllQuery, subscriptionID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, topic := range topics {
|
|
||||||
if _, err = tx.Exec(insertWebPushSubscriptionTopicQuery, subscriptionID, topic); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscriptionsForTopic returns all subscriptions for the given topic
|
|
||||||
func (c *webPushStore) SubscriptionsForTopic(topic string) ([]*webPushSubscription, error) {
|
|
||||||
rows, err := c.db.Query(selectWebPushSubscriptionsForTopicQuery, topic)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return c.subscriptionsFromRows(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscriptionsExpiring returns all subscriptions that have not been updated for a given time period
|
|
||||||
func (c *webPushStore) SubscriptionsExpiring(warnAfter time.Duration) ([]*webPushSubscription, error) {
|
|
||||||
rows, err := c.db.Query(selectWebPushSubscriptionsExpiringSoonQuery, time.Now().Add(-warnAfter).Unix())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
return c.subscriptionsFromRows(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarkExpiryWarningSent marks the given subscriptions as having received a warning about expiring soon
|
|
||||||
func (c *webPushStore) MarkExpiryWarningSent(subscriptions []*webPushSubscription) error {
|
|
||||||
tx, err := c.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
for _, subscription := range subscriptions {
|
|
||||||
if _, err := tx.Exec(updateWebPushSubscriptionWarningSentQuery, time.Now().Unix(), subscription.ID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *webPushStore) subscriptionsFromRows(rows *sql.Rows) ([]*webPushSubscription, error) {
|
|
||||||
subscriptions := make([]*webPushSubscription, 0)
|
|
||||||
for rows.Next() {
|
|
||||||
var id, endpoint, auth, p256dh, userID string
|
|
||||||
if err := rows.Scan(&id, &endpoint, &auth, &p256dh, &userID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
subscriptions = append(subscriptions, &webPushSubscription{
|
|
||||||
ID: id,
|
|
||||||
Endpoint: endpoint,
|
|
||||||
Auth: auth,
|
|
||||||
P256dh: p256dh,
|
|
||||||
UserID: userID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return subscriptions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveSubscriptionsByEndpoint removes the subscription for the given endpoint
|
|
||||||
func (c *webPushStore) RemoveSubscriptionsByEndpoint(endpoint string) error {
|
|
||||||
_, err := c.db.Exec(deleteWebPushSubscriptionByEndpointQuery, endpoint)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveSubscriptionsByUserID removes all subscriptions for the given user ID
|
|
||||||
func (c *webPushStore) RemoveSubscriptionsByUserID(userID string) error {
|
|
||||||
if userID == "" {
|
|
||||||
return errWebPushUserIDCannotBeEmpty
|
|
||||||
}
|
|
||||||
_, err := c.db.Exec(deleteWebPushSubscriptionByUserIDQuery, userID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveExpiredSubscriptions removes all subscriptions that have not been updated for a given time period
|
|
||||||
func (c *webPushStore) RemoveExpiredSubscriptions(expireAfter time.Duration) error {
|
|
||||||
_, err := c.db.Exec(deleteWebPushSubscriptionByAgeQuery, time.Now().Add(-expireAfter).Unix())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = c.db.Exec(deleteWebPushSubscriptionTopicWithoutSubscription)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying database connection
|
|
||||||
func (c *webPushStore) Close() error {
|
|
||||||
return c.db.Close()
|
|
||||||
}
|
|
||||||
@@ -1,199 +0,0 @@
|
|||||||
package server
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"net/netip"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWebPushStore_UpsertSubscription_SubscriptionsForTopic(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
|
||||||
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("test-topic")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
require.Equal(t, subs[0].Endpoint, testWebPushEndpoint)
|
|
||||||
require.Equal(t, subs[0].P256dh, "p256dh-key")
|
|
||||||
require.Equal(t, subs[0].Auth, "auth-key")
|
|
||||||
require.Equal(t, subs[0].UserID, "u_1234")
|
|
||||||
|
|
||||||
subs2, err := webPush.SubscriptionsForTopic("mytopic")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs2, 1)
|
|
||||||
require.Equal(t, subs[0].Endpoint, subs2[0].Endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_UpsertSubscription_SubscriberIPLimitReached(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert 10 subscriptions with the same IP address
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
endpoint := fmt.Sprintf(testWebPushEndpoint+"%d", i)
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(endpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Another one for the same endpoint should be fine
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
|
||||||
|
|
||||||
// But with a different endpoint it should fail
|
|
||||||
require.Equal(t, errWebPushTooManySubscriptions, webPush.UpsertSubscription(testWebPushEndpoint+"11", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
|
||||||
|
|
||||||
// But with a different IP address it should be fine again
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint+"99", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("9.9.9.9"), []string{"test-topic", "mytopic"}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_UpsertSubscription_UpdateTopics(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics, and another with one topic
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint+"1", "auth-key", "p256dh-key", "", netip.MustParseAddr("9.9.9.9"), []string{"topic1"}))
|
|
||||||
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 2)
|
|
||||||
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
|
||||||
require.Equal(t, testWebPushEndpoint+"1", subs[1].Endpoint)
|
|
||||||
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic2")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
|
||||||
|
|
||||||
// Update the first subscription to have only one topic
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
|
||||||
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 2)
|
|
||||||
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
|
||||||
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic2")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_RemoveSubscriptionsByEndpoint(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
|
|
||||||
// And remove it again
|
|
||||||
require.Nil(t, webPush.RemoveSubscriptionsByEndpoint(testWebPushEndpoint))
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_RemoveSubscriptionsByUserID(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
|
|
||||||
// And remove it again
|
|
||||||
require.Nil(t, webPush.RemoveSubscriptionsByUserID("u_1234"))
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_RemoveSubscriptionsByUserID_Empty(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
require.Equal(t, errWebPushUserIDCannotBeEmpty, webPush.RemoveSubscriptionsByUserID(""))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_MarkExpiryWarningSent(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
|
|
||||||
// Mark them as warning sent
|
|
||||||
require.Nil(t, webPush.MarkExpiryWarningSent(subs))
|
|
||||||
|
|
||||||
rows, err := webPush.db.Query("SELECT endpoint FROM subscription WHERE warned_at > 0")
|
|
||||||
require.Nil(t, err)
|
|
||||||
defer rows.Close()
|
|
||||||
var endpoint string
|
|
||||||
require.True(t, rows.Next())
|
|
||||||
require.Nil(t, rows.Scan(&endpoint))
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Equal(t, testWebPushEndpoint, endpoint)
|
|
||||||
require.False(t, rows.Next())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_SubscriptionsExpiring(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
|
|
||||||
// Fake-mark them as soon-to-expire
|
|
||||||
_, err = webPush.db.Exec("UPDATE subscription SET updated_at = ? WHERE endpoint = ?", time.Now().Add(-8*24*time.Hour).Unix(), testWebPushEndpoint)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Should not be cleaned up yet
|
|
||||||
require.Nil(t, webPush.RemoveExpiredSubscriptions(9*24*time.Hour))
|
|
||||||
|
|
||||||
// Run expiration
|
|
||||||
subs, err = webPush.SubscriptionsExpiring(7 * 24 * time.Hour)
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
require.Equal(t, testWebPushEndpoint, subs[0].Endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebPushStore_RemoveExpiredSubscriptions(t *testing.T) {
|
|
||||||
webPush := newTestWebPushStore(t)
|
|
||||||
defer webPush.Close()
|
|
||||||
|
|
||||||
// Insert subscription with two topics
|
|
||||||
require.Nil(t, webPush.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
|
||||||
subs, err := webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 1)
|
|
||||||
|
|
||||||
// Fake-mark them as expired
|
|
||||||
_, err = webPush.db.Exec("UPDATE subscription SET updated_at = ? WHERE endpoint = ?", time.Now().Add(-10*24*time.Hour).Unix(), testWebPushEndpoint)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
// Run expiration
|
|
||||||
require.Nil(t, webPush.RemoveExpiredSubscriptions(9*24*time.Hour))
|
|
||||||
|
|
||||||
// List again, should be 0
|
|
||||||
subs, err = webPush.SubscriptionsForTopic("topic1")
|
|
||||||
require.Nil(t, err)
|
|
||||||
require.Len(t, subs, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestWebPushStore(t *testing.T) *webPushStore {
|
|
||||||
webPush, err := newWebPushStore(filepath.Join(t.TempDir(), "webpush.db"), "")
|
|
||||||
require.Nil(t, err)
|
|
||||||
return webPush
|
|
||||||
}
|
|
||||||
35
tools/pgimport/README.md
Normal file
35
tools/pgimport/README.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# pgimport
|
||||||
|
|
||||||
|
Migrates ntfy data from SQLite to PostgreSQL.
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go build -o pgimport ./tools/pgimport/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Using CLI flags
|
||||||
|
pgimport \
|
||||||
|
--database-url "postgres://user:pass@host:5432/ntfy?sslmode=require" \
|
||||||
|
--cache-file /var/cache/ntfy/cache.db \
|
||||||
|
--auth-file /var/lib/ntfy/user.db \
|
||||||
|
--web-push-file /var/lib/ntfy/webpush.db
|
||||||
|
|
||||||
|
# Using server.yml (flags override config values)
|
||||||
|
pgimport --config /etc/ntfy/server.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- PostgreSQL schema must already be set up (run ntfy with `database-url` once)
|
||||||
|
- ntfy must not be running during the import
|
||||||
|
- All three SQLite files are optional; only the ones specified will be imported
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- The tool is idempotent and safe to re-run
|
||||||
|
- After importing, row counts and content are verified against the SQLite sources
|
||||||
|
- Invalid UTF-8 in messages is replaced with the Unicode replacement character
|
||||||
888
tools/pgimport/main.go
Normal file
888
tools/pgimport/main.go
Normal file
@@ -0,0 +1,888 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"github.com/urfave/cli/v2/altsrc"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
"heckel.io/ntfy/v2/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
batchSize = 1000
|
||||||
|
|
||||||
|
expectedMessageSchemaVersion = 14
|
||||||
|
expectedUserSchemaVersion = 6
|
||||||
|
expectedWebPushSchemaVersion = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var flags = []cli.Flag{
|
||||||
|
&cli.StringFlag{Name: "config", Aliases: []string{"c"}, Usage: "path to server.yml config file"},
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "database-url", Aliases: []string{"database_url"}, Usage: "PostgreSQL connection string"}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "cache-file", Aliases: []string{"cache_file"}, Usage: "SQLite message cache file path"}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "auth-file", Aliases: []string{"auth_file"}, Usage: "SQLite user/auth database file path"}),
|
||||||
|
altsrc.NewStringFlag(&cli.StringFlag{Name: "web-push-file", Aliases: []string{"web_push_file"}, Usage: "SQLite web push database file path"}),
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
app := &cli.App{
|
||||||
|
Name: "pgimport",
|
||||||
|
Usage: "SQLite to PostgreSQL migration tool for ntfy",
|
||||||
|
UsageText: "pgimport [OPTIONS]",
|
||||||
|
Flags: flags,
|
||||||
|
Before: loadConfigFile("config", flags),
|
||||||
|
Action: execImport,
|
||||||
|
}
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func execImport(c *cli.Context) error {
|
||||||
|
databaseURL := c.String("database-url")
|
||||||
|
cacheFile := c.String("cache-file")
|
||||||
|
authFile := c.String("auth-file")
|
||||||
|
webPushFile := c.String("web-push-file")
|
||||||
|
|
||||||
|
if databaseURL == "" {
|
||||||
|
return fmt.Errorf("database-url must be set (via --database-url or config file)")
|
||||||
|
}
|
||||||
|
if cacheFile == "" && authFile == "" && webPushFile == "" {
|
||||||
|
return fmt.Errorf("at least one of --cache-file, --auth-file, or --web-push-file must be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("pgimport - SQLite to PostgreSQL migration tool for ntfy")
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Sources:")
|
||||||
|
printSource(" Cache file: ", cacheFile)
|
||||||
|
printSource(" Auth file: ", authFile)
|
||||||
|
printSource(" Web push file: ", webPushFile)
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Target:")
|
||||||
|
fmt.Printf(" Database URL: %s\n", maskPassword(databaseURL))
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("This will import data from the SQLite databases into PostgreSQL.")
|
||||||
|
fmt.Print("Make sure ntfy is not running. Continue? (y/n): ")
|
||||||
|
|
||||||
|
var answer string
|
||||||
|
fmt.Scanln(&answer)
|
||||||
|
if strings.TrimSpace(strings.ToLower(answer)) != "y" {
|
||||||
|
fmt.Println("Aborted.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
pgDB, err := db.OpenPostgres(databaseURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot connect to PostgreSQL: %w", err)
|
||||||
|
}
|
||||||
|
defer pgDB.Close()
|
||||||
|
|
||||||
|
if authFile != "" {
|
||||||
|
if err := verifySchemaVersion(pgDB, "user", expectedUserSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := importUsers(authFile, pgDB); err != nil {
|
||||||
|
return fmt.Errorf("cannot import users: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cacheFile != "" {
|
||||||
|
if err := verifySchemaVersion(pgDB, "message", expectedMessageSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := importMessages(cacheFile, pgDB); err != nil {
|
||||||
|
return fmt.Errorf("cannot import messages: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if webPushFile != "" {
|
||||||
|
if err := verifySchemaVersion(pgDB, "webpush", expectedWebPushSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := importWebPush(webPushFile, pgDB); err != nil {
|
||||||
|
return fmt.Errorf("cannot import web push subscriptions: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Verifying migration ...")
|
||||||
|
failed := false
|
||||||
|
if authFile != "" {
|
||||||
|
if err := verifyUsers(authFile, pgDB, &failed); err != nil {
|
||||||
|
return fmt.Errorf("cannot verify users: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cacheFile != "" {
|
||||||
|
if err := verifyMessages(cacheFile, pgDB, &failed); err != nil {
|
||||||
|
return fmt.Errorf("cannot verify messages: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if webPushFile != "" {
|
||||||
|
if err := verifyWebPush(webPushFile, pgDB, &failed); err != nil {
|
||||||
|
return fmt.Errorf("cannot verify web push: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
if failed {
|
||||||
|
return fmt.Errorf("verification FAILED, see above for details")
|
||||||
|
}
|
||||||
|
fmt.Println("Verification successful. Migration complete.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadConfigFile(configFlag string, flags []cli.Flag) cli.BeforeFunc {
|
||||||
|
return func(c *cli.Context) error {
|
||||||
|
configFile := c.String(configFlag)
|
||||||
|
if configFile == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(configFile); os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("config file %s does not exist", configFile)
|
||||||
|
}
|
||||||
|
inputSource, err := newYamlSourceFromFile(configFile, flags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return altsrc.ApplyInputSourceValues(c, inputSource, flags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newYamlSourceFromFile(file string, flags []cli.Flag) (altsrc.InputSourceContext, error) {
|
||||||
|
var rawConfig map[any]any
|
||||||
|
b, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := yaml.Unmarshal(b, &rawConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, f := range flags {
|
||||||
|
flagName := f.Names()[0]
|
||||||
|
for _, flagAlias := range f.Names()[1:] {
|
||||||
|
if _, ok := rawConfig[flagAlias]; ok {
|
||||||
|
rawConfig[flagName] = rawConfig[flagAlias]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return altsrc.NewMapInputSource(file, rawConfig), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifySchemaVersion(pgDB *sql.DB, store string, expected int) error {
|
||||||
|
var version int
|
||||||
|
err := pgDB.QueryRow(`SELECT version FROM schema_version WHERE store = $1`, store).Scan(&version)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read %s schema version from PostgreSQL (is the schema set up?): %w", store, err)
|
||||||
|
}
|
||||||
|
if version != expected {
|
||||||
|
return fmt.Errorf("%s schema version mismatch: expected %d, got %d", store, expected, version)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printSource(label, path string) {
|
||||||
|
if path == "" {
|
||||||
|
fmt.Printf("%s(not set, skipping)\n", label)
|
||||||
|
} else if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
fmt.Printf("%s%s (NOT FOUND, skipping)\n", label, path)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s%s\n", label, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func maskPassword(databaseURL string) string {
|
||||||
|
u, err := url.Parse(databaseURL)
|
||||||
|
if err != nil {
|
||||||
|
return databaseURL
|
||||||
|
}
|
||||||
|
if u.User != nil {
|
||||||
|
if _, hasPass := u.User.Password(); hasPass {
|
||||||
|
masked := u.Scheme + "://" + u.User.Username() + ":****@" + u.Host + u.Path
|
||||||
|
if u.RawQuery != "" {
|
||||||
|
masked += "?" + u.RawQuery
|
||||||
|
}
|
||||||
|
return masked
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func openSQLite(filename string) (*sql.DB, error) {
|
||||||
|
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("file %s does not exist", filename)
|
||||||
|
}
|
||||||
|
return sql.Open("sqlite3", filename+"?mode=ro")
|
||||||
|
}
|
||||||
|
|
||||||
|
// User import
|
||||||
|
|
||||||
|
func importUsers(sqliteFile string, pgDB *sql.DB) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Skipping user import: %s\n", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
fmt.Printf("Importing users from %s ...\n", sqliteFile)
|
||||||
|
|
||||||
|
count, err := importTiers(sqlDB, pgDB)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("importing tiers: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d tiers\n", count)
|
||||||
|
|
||||||
|
count, err = importUserRows(sqlDB, pgDB)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("importing users: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d users\n", count)
|
||||||
|
|
||||||
|
count, err = importUserAccess(sqlDB, pgDB)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("importing user access: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d access entries\n", count)
|
||||||
|
|
||||||
|
count, err = importUserTokens(sqlDB, pgDB)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("importing user tokens: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d tokens\n", count)
|
||||||
|
|
||||||
|
count, err = importUserPhones(sqlDB, pgDB)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("importing user phones: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d phone numbers\n", count)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func importTiers(sqlDB, pgDB *sql.DB) (int, error) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id FROM tier`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`INSERT INTO tier (id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) ON CONFLICT (id) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var id, code, name string
|
||||||
|
var messagesLimit, messagesExpiryDuration, emailsLimit, callsLimit, reservationsLimit int64
|
||||||
|
var attachmentFileSizeLimit, attachmentTotalSizeLimit, attachmentExpiryDuration, attachmentBandwidthLimit int64
|
||||||
|
var stripeMonthlyPriceID, stripeYearlyPriceID sql.NullString
|
||||||
|
if err := rows.Scan(&id, &code, &name, &messagesLimit, &messagesExpiryDuration, &emailsLimit, &callsLimit, &reservationsLimit, &attachmentFileSizeLimit, &attachmentTotalSizeLimit, &attachmentExpiryDuration, &attachmentBandwidthLimit, &stripeMonthlyPriceID, &stripeYearlyPriceID); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := stmt.Exec(id, code, name, messagesLimit, messagesExpiryDuration, emailsLimit, callsLimit, reservationsLimit, attachmentFileSizeLimit, attachmentTotalSizeLimit, attachmentExpiryDuration, attachmentBandwidthLimit, stripeMonthlyPriceID, stripeYearlyPriceID); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count, tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func importUserRows(sqlDB, pgDB *sql.DB) (int, error) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT id, user, pass, role, prefs, sync_topic, provisioned, stats_messages, stats_emails, stats_calls, stripe_customer_id, stripe_subscription_id, stripe_subscription_status, stripe_subscription_interval, stripe_subscription_paid_until, stripe_subscription_cancel_at, created, deleted, tier_id FROM user`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`
|
||||||
|
INSERT INTO "user" (id, user_name, pass, role, prefs, sync_topic, provisioned, stats_messages, stats_emails, stats_calls, stripe_customer_id, stripe_subscription_id, stripe_subscription_status, stripe_subscription_interval, stripe_subscription_paid_until, stripe_subscription_cancel_at, created, deleted, tier_id)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19)
|
||||||
|
ON CONFLICT (id) DO NOTHING
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var id, userName, pass, role, prefs, syncTopic string
|
||||||
|
var provisioned int
|
||||||
|
var statsMessages, statsEmails, statsCalls int64
|
||||||
|
var stripeCustomerID, stripeSubscriptionID, stripeSubscriptionStatus, stripeSubscriptionInterval sql.NullString
|
||||||
|
var stripeSubscriptionPaidUntil, stripeSubscriptionCancelAt sql.NullInt64
|
||||||
|
var created int64
|
||||||
|
var deleted sql.NullInt64
|
||||||
|
var tierID sql.NullString
|
||||||
|
if err := rows.Scan(&id, &userName, &pass, &role, &prefs, &syncTopic, &provisioned, &statsMessages, &statsEmails, &statsCalls, &stripeCustomerID, &stripeSubscriptionID, &stripeSubscriptionStatus, &stripeSubscriptionInterval, &stripeSubscriptionPaidUntil, &stripeSubscriptionCancelAt, &created, &deleted, &tierID); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
provisionedBool := provisioned != 0
|
||||||
|
if _, err := stmt.Exec(id, userName, pass, role, prefs, syncTopic, provisionedBool, statsMessages, statsEmails, statsCalls, stripeCustomerID, stripeSubscriptionID, stripeSubscriptionStatus, stripeSubscriptionInterval, stripeSubscriptionPaidUntil, stripeSubscriptionCancelAt, created, deleted, tierID); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count, tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func importUserAccess(sqlDB, pgDB *sql.DB) (int, error) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT a.user_id, a.topic, a.read, a.write, a.owner_user_id, a.provisioned FROM user_access a JOIN user u ON u.id = a.user_id`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`INSERT INTO user_access (user_id, topic, read, write, owner_user_id, provisioned) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (user_id, topic) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var userID, topic string
|
||||||
|
var read, write, provisioned int
|
||||||
|
var ownerUserID sql.NullString
|
||||||
|
if err := rows.Scan(&userID, &topic, &read, &write, &ownerUserID, &provisioned); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
readBool := read != 0
|
||||||
|
writeBool := write != 0
|
||||||
|
provisionedBool := provisioned != 0
|
||||||
|
if _, err := stmt.Exec(userID, topic, readBool, writeBool, ownerUserID, provisionedBool); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count, tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func importUserTokens(sqlDB, pgDB *sql.DB) (int, error) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT t.user_id, t.token, t.label, t.last_access, t.last_origin, t.expires, t.provisioned FROM user_token t JOIN user u ON u.id = t.user_id`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`INSERT INTO user_token (user_id, token, label, last_access, last_origin, expires, provisioned) VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (user_id, token) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var userID, token, label, lastOrigin string
|
||||||
|
var lastAccess, expires int64
|
||||||
|
var provisioned int
|
||||||
|
if err := rows.Scan(&userID, &token, &label, &lastAccess, &lastOrigin, &expires, &provisioned); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
provisionedBool := provisioned != 0
|
||||||
|
if _, err := stmt.Exec(userID, token, label, lastAccess, lastOrigin, expires, provisionedBool); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count, tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func importUserPhones(sqlDB, pgDB *sql.DB) (int, error) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT p.user_id, p.phone_number FROM user_phone p JOIN user u ON u.id = p.user_id`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`INSERT INTO user_phone (user_id, phone_number) VALUES ($1, $2) ON CONFLICT (user_id, phone_number) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var userID, phoneNumber string
|
||||||
|
if err := rows.Scan(&userID, &phoneNumber); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if _, err := stmt.Exec(userID, phoneNumber); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count, tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message import
|
||||||
|
|
||||||
|
func importMessages(sqliteFile string, pgDB *sql.DB) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Skipping message import: %s\n", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
fmt.Printf("Importing messages from %s ...\n", sqliteFile)
|
||||||
|
|
||||||
|
rows, err := sqlDB.Query(`SELECT mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user, content_type, encoding, published FROM messages`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("querying messages: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
if _, err := pgDB.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_message_mid_unique ON message (mid)`); err != nil {
|
||||||
|
return fmt.Errorf("creating unique index on mid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
insertQuery := `INSERT INTO message (mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user_id, content_type, encoding, published) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24) ON CONFLICT (mid) DO NOTHING`
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
batchCount := 0
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(insertQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var mid, sequenceID, event, topic, message, title, tags, click, icon, actions string
|
||||||
|
var attachmentName, attachmentType, attachmentURL, sender, userID, contentType, encoding string
|
||||||
|
var msgTime, expires, attachmentExpires int64
|
||||||
|
var priority int
|
||||||
|
var attachmentSize int64
|
||||||
|
var attachmentDeleted, published int
|
||||||
|
if err := rows.Scan(&mid, &sequenceID, &msgTime, &event, &expires, &topic, &message, &title, &priority, &tags, &click, &icon, &actions, &attachmentName, &attachmentType, &attachmentSize, &attachmentExpires, &attachmentURL, &attachmentDeleted, &sender, &userID, &contentType, &encoding, &published); err != nil {
|
||||||
|
return fmt.Errorf("scanning message: %w", err)
|
||||||
|
}
|
||||||
|
mid = toUTF8(mid)
|
||||||
|
sequenceID = toUTF8(sequenceID)
|
||||||
|
event = toUTF8(event)
|
||||||
|
topic = toUTF8(topic)
|
||||||
|
message = toUTF8(message)
|
||||||
|
title = toUTF8(title)
|
||||||
|
tags = toUTF8(tags)
|
||||||
|
click = toUTF8(click)
|
||||||
|
icon = toUTF8(icon)
|
||||||
|
actions = toUTF8(actions)
|
||||||
|
attachmentName = toUTF8(attachmentName)
|
||||||
|
attachmentType = toUTF8(attachmentType)
|
||||||
|
attachmentURL = toUTF8(attachmentURL)
|
||||||
|
sender = toUTF8(sender)
|
||||||
|
userID = toUTF8(userID)
|
||||||
|
contentType = toUTF8(contentType)
|
||||||
|
encoding = toUTF8(encoding)
|
||||||
|
attachmentDeletedBool := attachmentDeleted != 0
|
||||||
|
publishedBool := published != 0
|
||||||
|
if _, err := stmt.Exec(mid, sequenceID, msgTime, event, expires, topic, message, title, priority, tags, click, icon, actions, attachmentName, attachmentType, attachmentSize, attachmentExpires, attachmentURL, attachmentDeletedBool, sender, userID, contentType, encoding, publishedBool); err != nil {
|
||||||
|
return fmt.Errorf("inserting message: %w", err)
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
batchCount++
|
||||||
|
if batchCount >= batchSize {
|
||||||
|
stmt.Close()
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("committing message batch: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" ... %d messages\n", count)
|
||||||
|
tx, err = pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stmt, err = tx.Prepare(insertQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batchCount = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if batchCount > 0 {
|
||||||
|
stmt.Close()
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("committing final message batch: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d messages\n", count)
|
||||||
|
|
||||||
|
var statsValue int64
|
||||||
|
err = sqlDB.QueryRow(`SELECT value FROM stats WHERE key = 'messages'`).Scan(&statsValue)
|
||||||
|
if err == nil {
|
||||||
|
if _, err := pgDB.Exec(`UPDATE message_stats SET value = $1 WHERE key = 'messages'`, statsValue); err != nil {
|
||||||
|
return fmt.Errorf("updating message stats: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Updated message stats (count: %d)\n", statsValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Web push import
|
||||||
|
|
||||||
|
func importWebPush(sqliteFile string, pgDB *sql.DB) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Skipping web push import: %s\n", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
fmt.Printf("Importing web push subscriptions from %s ...\n", sqliteFile)
|
||||||
|
|
||||||
|
rows, err := sqlDB.Query(`SELECT id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at FROM subscription`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("querying subscriptions: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
tx, err := pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`INSERT INTO webpush_subscription (id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (id) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for rows.Next() {
|
||||||
|
var id, endpoint, keyAuth, keyP256dh, userID, subscriberIP string
|
||||||
|
var updatedAt, warnedAt int64
|
||||||
|
if err := rows.Scan(&id, &endpoint, &keyAuth, &keyP256dh, &userID, &subscriberIP, &updatedAt, &warnedAt); err != nil {
|
||||||
|
return fmt.Errorf("scanning subscription: %w", err)
|
||||||
|
}
|
||||||
|
if _, err := stmt.Exec(id, endpoint, keyAuth, keyP256dh, userID, subscriberIP, updatedAt, warnedAt); err != nil {
|
||||||
|
return fmt.Errorf("inserting subscription: %w", err)
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
stmt.Close()
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("committing subscriptions: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d subscriptions\n", count)
|
||||||
|
|
||||||
|
topicRows, err := sqlDB.Query(`SELECT subscription_id, topic FROM subscription_topic`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("querying subscription topics: %w", err)
|
||||||
|
}
|
||||||
|
defer topicRows.Close()
|
||||||
|
|
||||||
|
tx, err = pgDB.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err = tx.Prepare(`INSERT INTO webpush_subscription_topic (subscription_id, topic) VALUES ($1, $2) ON CONFLICT (subscription_id, topic) DO NOTHING`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
topicCount := 0
|
||||||
|
for topicRows.Next() {
|
||||||
|
var subscriptionID, topic string
|
||||||
|
if err := topicRows.Scan(&subscriptionID, &topic); err != nil {
|
||||||
|
return fmt.Errorf("scanning subscription topic: %w", err)
|
||||||
|
}
|
||||||
|
if _, err := stmt.Exec(subscriptionID, topic); err != nil {
|
||||||
|
return fmt.Errorf("inserting subscription topic: %w", err)
|
||||||
|
}
|
||||||
|
topicCount++
|
||||||
|
}
|
||||||
|
stmt.Close()
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("committing subscription topics: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf(" Imported %d subscription topics\n", topicCount)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toUTF8(s string) string {
|
||||||
|
return strings.ToValidUTF8(s, "\uFFFD")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
|
||||||
|
func verifyUsers(sqliteFile string, pgDB *sql.DB, failed *bool) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "tier", `SELECT COUNT(*) FROM tier`, `SELECT COUNT(*) FROM tier`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "tier",
|
||||||
|
`SELECT id, code, name FROM tier ORDER BY id`,
|
||||||
|
`SELECT id, code, name FROM tier ORDER BY id COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "user", `SELECT COUNT(*) FROM user`, `SELECT COUNT(*) FROM "user"`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "user",
|
||||||
|
`SELECT id, user, role, sync_topic FROM user ORDER BY id`,
|
||||||
|
`SELECT id, user_name, role, sync_topic FROM "user" ORDER BY id COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "user_access", `SELECT COUNT(*) FROM user_access a JOIN user u ON u.id = a.user_id`, `SELECT COUNT(*) FROM user_access`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "user_access",
|
||||||
|
`SELECT a.user_id, a.topic FROM user_access a JOIN user u ON u.id = a.user_id ORDER BY a.user_id, a.topic`,
|
||||||
|
`SELECT user_id, topic FROM user_access ORDER BY user_id COLLATE "C", topic COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "user_token", `SELECT COUNT(*) FROM user_token t JOIN user u ON u.id = t.user_id`, `SELECT COUNT(*) FROM user_token`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "user_token",
|
||||||
|
`SELECT t.user_id, t.token, t.label FROM user_token t JOIN user u ON u.id = t.user_id ORDER BY t.user_id, t.token`,
|
||||||
|
`SELECT user_id, token, label FROM user_token ORDER BY user_id COLLATE "C", token COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "user_phone", `SELECT COUNT(*) FROM user_phone p JOIN user u ON u.id = p.user_id`, `SELECT COUNT(*) FROM user_phone`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "user_phone",
|
||||||
|
`SELECT p.user_id, p.phone_number FROM user_phone p JOIN user u ON u.id = p.user_id ORDER BY p.user_id, p.phone_number`,
|
||||||
|
`SELECT user_id, phone_number FROM user_phone ORDER BY user_id COLLATE "C", phone_number COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyMessages(sqliteFile string, pgDB *sql.DB, failed *bool) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "messages", `SELECT COUNT(*) FROM messages`, `SELECT COUNT(*) FROM message`, failed)
|
||||||
|
verifySampledMessages(sqlDB, pgDB, failed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyWebPush(sqliteFile string, pgDB *sql.DB, failed *bool) error {
|
||||||
|
sqlDB, err := openSQLite(sqliteFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "subscription", `SELECT COUNT(*) FROM subscription`, `SELECT COUNT(*) FROM webpush_subscription`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "subscription",
|
||||||
|
`SELECT id, endpoint, key_auth, key_p256dh, user_id FROM subscription ORDER BY id`,
|
||||||
|
`SELECT id, endpoint, key_auth, key_p256dh, user_id FROM webpush_subscription ORDER BY id COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
verifyCount(sqlDB, pgDB, "subscription_topic", `SELECT COUNT(*) FROM subscription_topic`, `SELECT COUNT(*) FROM webpush_subscription_topic`, failed)
|
||||||
|
verifyContent(sqlDB, pgDB, "subscription_topic",
|
||||||
|
`SELECT subscription_id, topic FROM subscription_topic ORDER BY subscription_id, topic`,
|
||||||
|
`SELECT subscription_id, topic FROM webpush_subscription_topic ORDER BY subscription_id COLLATE "C", topic COLLATE "C"`,
|
||||||
|
failed)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyCount(sqlDB, pgDB *sql.DB, table, sqliteQuery, pgQuery string, failed *bool) {
|
||||||
|
var sqliteCount, pgCount int64
|
||||||
|
if err := sqlDB.QueryRow(sqliteQuery).Scan(&sqliteCount); err != nil {
|
||||||
|
fmt.Printf(" %-25s count ERROR reading SQLite: %s\n", table, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := pgDB.QueryRow(pgQuery).Scan(&pgCount); err != nil {
|
||||||
|
fmt.Printf(" %-25s count ERROR reading PostgreSQL: %s\n", table, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if sqliteCount == pgCount {
|
||||||
|
fmt.Printf(" %-25s count OK (%d rows)\n", table, pgCount)
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" %-25s count MISMATCH: SQLite=%d, PostgreSQL=%d\n", table, sqliteCount, pgCount)
|
||||||
|
*failed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyContent(sqlDB, pgDB *sql.DB, table, sqliteQuery, pgQuery string, failed *bool) {
|
||||||
|
sqliteRows, err := sqlDB.Query(sqliteQuery)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR reading SQLite: %s\n", table, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer sqliteRows.Close()
|
||||||
|
|
||||||
|
pgRows, err := pgDB.Query(pgQuery)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR reading PostgreSQL: %s\n", table, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer pgRows.Close()
|
||||||
|
|
||||||
|
cols, err := sqliteRows.Columns()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR reading columns: %s\n", table, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
numCols := len(cols)
|
||||||
|
|
||||||
|
rowNum := 0
|
||||||
|
mismatches := 0
|
||||||
|
for sqliteRows.Next() {
|
||||||
|
rowNum++
|
||||||
|
if !pgRows.Next() {
|
||||||
|
fmt.Printf(" %-25s content MISMATCH: PostgreSQL has fewer rows (at row %d)\n", table, rowNum)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sqliteVals := makeStringSlice(numCols)
|
||||||
|
pgVals := makeStringSlice(numCols)
|
||||||
|
if err := sqliteRows.Scan(sqliteVals...); err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR scanning SQLite row %d: %s\n", table, rowNum, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := pgRows.Scan(pgVals...); err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR scanning PostgreSQL row %d: %s\n", table, rowNum, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < numCols; i++ {
|
||||||
|
sv := *(sqliteVals[i].(*sql.NullString))
|
||||||
|
pv := *(pgVals[i].(*sql.NullString))
|
||||||
|
if sv != pv {
|
||||||
|
mismatches++
|
||||||
|
if mismatches <= 3 {
|
||||||
|
fmt.Printf(" %-25s content MISMATCH at row %d, col %s: SQLite=%q, PostgreSQL=%q\n", table, rowNum, cols[i], sv.String, pv.String)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pgRows.Next() {
|
||||||
|
fmt.Printf(" %-25s content MISMATCH: PostgreSQL has more rows than SQLite\n", table)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if mismatches > 0 {
|
||||||
|
if mismatches > 3 {
|
||||||
|
fmt.Printf(" %-25s content ... and %d more mismatches\n", table, mismatches-3)
|
||||||
|
}
|
||||||
|
*failed = true
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" %-25s content OK\n", table)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifySampledMessages(sqlDB, pgDB *sql.DB, failed *bool) {
|
||||||
|
rows, err := sqlDB.Query(`SELECT mid, topic, time, message, title, tags, priority FROM messages ORDER BY mid`)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR reading SQLite: %s\n", "messages (sampled)", err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
rowNum := 0
|
||||||
|
checked := 0
|
||||||
|
mismatches := 0
|
||||||
|
for rows.Next() {
|
||||||
|
rowNum++
|
||||||
|
var mid, topic, message, title, tags string
|
||||||
|
var msgTime int64
|
||||||
|
var priority int
|
||||||
|
if err := rows.Scan(&mid, &topic, &msgTime, &message, &title, &tags, &priority); err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR scanning SQLite row %d: %s\n", "messages (sampled)", rowNum, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if rowNum%100 != 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
checked++
|
||||||
|
var pgTopic, pgMessage, pgTitle, pgTags string
|
||||||
|
var pgTime int64
|
||||||
|
var pgPriority int
|
||||||
|
err := pgDB.QueryRow(`SELECT topic, time, message, title, tags, priority FROM message WHERE mid = $1`, mid).
|
||||||
|
Scan(&pgTopic, &pgTime, &pgMessage, &pgTitle, &pgTags, &pgPriority)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
mismatches++
|
||||||
|
if mismatches <= 3 {
|
||||||
|
fmt.Printf(" %-25s content MISMATCH: mid=%s not found in PostgreSQL\n", "messages (sampled)", mid)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
fmt.Printf(" %-25s content ERROR querying PostgreSQL for mid=%s: %s\n", "messages (sampled)", mid, err)
|
||||||
|
*failed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
topic = toUTF8(topic)
|
||||||
|
message = toUTF8(message)
|
||||||
|
title = toUTF8(title)
|
||||||
|
tags = toUTF8(tags)
|
||||||
|
if topic != pgTopic || msgTime != pgTime || message != pgMessage || title != pgTitle || tags != pgTags || priority != pgPriority {
|
||||||
|
mismatches++
|
||||||
|
if mismatches <= 3 {
|
||||||
|
fmt.Printf(" %-25s content MISMATCH at mid=%s\n", "messages (sampled)", mid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mismatches > 0 {
|
||||||
|
if mismatches > 3 {
|
||||||
|
fmt.Printf(" %-25s content ... and %d more mismatches\n", "messages (sampled)", mismatches-3)
|
||||||
|
}
|
||||||
|
*failed = true
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" %-25s content OK (%d samples checked)\n", "messages (sampled)", checked)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeStringSlice(n int) []any {
|
||||||
|
vals := make([]any, n)
|
||||||
|
for i := range vals {
|
||||||
|
vals[i] = &sql.NullString{}
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
1730
user/manager.go
1730
user/manager.go
File diff suppressed because it is too large
Load Diff
270
user/manager_postgres.go
Normal file
270
user/manager_postgres.go
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL queries
|
||||||
|
const (
|
||||||
|
// User queries
|
||||||
|
postgresSelectUserByIDQuery = `
|
||||||
|
SELECT u.id, u.user_name, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, u.deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM "user" u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE u.id = $1
|
||||||
|
`
|
||||||
|
postgresSelectUserByNameQuery = `
|
||||||
|
SELECT u.id, u.user_name, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, u.deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM "user" u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE user_name = $1
|
||||||
|
`
|
||||||
|
postgresSelectUserByTokenQuery = `
|
||||||
|
SELECT u.id, u.user_name, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, u.deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM "user" u
|
||||||
|
JOIN user_token tk on u.id = tk.user_id
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE tk.token = $1 AND (tk.expires = 0 OR tk.expires >= $2)
|
||||||
|
`
|
||||||
|
postgresSelectUserByStripeCustomerIDQuery = `
|
||||||
|
SELECT u.id, u.user_name, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, u.deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM "user" u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE u.stripe_customer_id = $1
|
||||||
|
`
|
||||||
|
postgresSelectUsernamesQuery = `
|
||||||
|
SELECT user_name
|
||||||
|
FROM "user"
|
||||||
|
ORDER BY
|
||||||
|
CASE role
|
||||||
|
WHEN 'admin' THEN 1
|
||||||
|
WHEN 'anonymous' THEN 3
|
||||||
|
ELSE 2
|
||||||
|
END, user_name
|
||||||
|
`
|
||||||
|
postgresSelectUserCountQuery = `SELECT COUNT(*) FROM "user"`
|
||||||
|
postgresSelectUserIDFromUsernameQuery = `SELECT id FROM "user" WHERE user_name = $1`
|
||||||
|
postgresInsertUserQuery = `INSERT INTO "user" (id, user_name, pass, role, sync_topic, provisioned, created) VALUES ($1, $2, $3, $4, $5, $6, $7)`
|
||||||
|
postgresUpdateUserPassQuery = `UPDATE "user" SET pass = $1 WHERE user_name = $2`
|
||||||
|
postgresUpdateUserRoleQuery = `UPDATE "user" SET role = $1 WHERE user_name = $2`
|
||||||
|
postgresUpdateUserProvisionedQuery = `UPDATE "user" SET provisioned = $1 WHERE user_name = $2`
|
||||||
|
postgresUpdateUserPrefsQuery = `UPDATE "user" SET prefs = $1 WHERE id = $2`
|
||||||
|
postgresUpdateUserStatsQuery = `UPDATE "user" SET stats_messages = $1, stats_emails = $2, stats_calls = $3 WHERE id = $4`
|
||||||
|
postgresUpdateUserStatsResetAllQuery = `UPDATE "user" SET stats_messages = 0, stats_emails = 0, stats_calls = 0`
|
||||||
|
postgresUpdateUserTierQuery = `UPDATE "user" SET tier_id = (SELECT id FROM tier WHERE code = $1) WHERE user_name = $2`
|
||||||
|
postgresUpdateUserDeletedQuery = `UPDATE "user" SET deleted = $1 WHERE id = $2`
|
||||||
|
postgresDeleteUserQuery = `DELETE FROM "user" WHERE user_name = $1`
|
||||||
|
postgresDeleteUserTierQuery = `UPDATE "user" SET tier_id = null WHERE user_name = $1`
|
||||||
|
postgresDeleteUsersMarkedQuery = `DELETE FROM "user" WHERE deleted < $1`
|
||||||
|
|
||||||
|
// Access queries
|
||||||
|
postgresSelectTopicPermsQuery = `
|
||||||
|
SELECT read, write
|
||||||
|
FROM user_access a
|
||||||
|
JOIN "user" u ON u.id = a.user_id
|
||||||
|
WHERE (u.user_name = $1 OR u.user_name = $2) AND $3 LIKE a.topic ESCAPE '\'
|
||||||
|
ORDER BY u.user_name DESC, LENGTH(a.topic) DESC, CASE WHEN a.write THEN 1 ELSE 0 END DESC
|
||||||
|
`
|
||||||
|
postgresSelectUserAllAccessQuery = `
|
||||||
|
SELECT user_id, topic, read, write, provisioned
|
||||||
|
FROM user_access
|
||||||
|
ORDER BY LENGTH(topic) DESC, CASE WHEN write THEN 1 ELSE 0 END DESC, CASE WHEN read THEN 1 ELSE 0 END DESC, topic
|
||||||
|
`
|
||||||
|
postgresSelectUserAccessQuery = `
|
||||||
|
SELECT topic, read, write, provisioned
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = (SELECT id FROM "user" WHERE user_name = $1)
|
||||||
|
ORDER BY LENGTH(topic) DESC, CASE WHEN write THEN 1 ELSE 0 END DESC, CASE WHEN read THEN 1 ELSE 0 END DESC, topic
|
||||||
|
`
|
||||||
|
postgresSelectUserReservationsQuery = `
|
||||||
|
SELECT a_user.topic, a_user.read, a_user.write, a_everyone.read AS everyone_read, a_everyone.write AS everyone_write
|
||||||
|
FROM user_access a_user
|
||||||
|
LEFT JOIN user_access a_everyone ON a_user.topic = a_everyone.topic AND a_everyone.user_id = (SELECT id FROM "user" WHERE user_name = $1)
|
||||||
|
WHERE a_user.user_id = a_user.owner_user_id
|
||||||
|
AND a_user.owner_user_id = (SELECT id FROM "user" WHERE user_name = $2)
|
||||||
|
ORDER BY a_user.topic
|
||||||
|
`
|
||||||
|
postgresSelectUserReservationsCountQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = owner_user_id
|
||||||
|
AND owner_user_id = (SELECT id FROM "user" WHERE user_name = $1)
|
||||||
|
`
|
||||||
|
postgresSelectUserReservationsOwnerQuery = `
|
||||||
|
SELECT owner_user_id
|
||||||
|
FROM user_access
|
||||||
|
WHERE topic = $1
|
||||||
|
AND user_id = owner_user_id
|
||||||
|
`
|
||||||
|
postgresSelectUserHasReservationQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = owner_user_id
|
||||||
|
AND owner_user_id = (SELECT id FROM "user" WHERE user_name = $1)
|
||||||
|
AND topic = $2
|
||||||
|
`
|
||||||
|
postgresSelectOtherAccessCountQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE (topic = $1 OR $2 LIKE topic ESCAPE '\')
|
||||||
|
AND (owner_user_id IS NULL OR owner_user_id != (SELECT id FROM "user" WHERE user_name = $3))
|
||||||
|
`
|
||||||
|
postgresUpsertUserAccessQuery = `
|
||||||
|
INSERT INTO user_access (user_id, topic, read, write, owner_user_id, provisioned)
|
||||||
|
VALUES (
|
||||||
|
(SELECT id FROM "user" WHERE user_name = $1),
|
||||||
|
$2,
|
||||||
|
$3,
|
||||||
|
$4,
|
||||||
|
CASE WHEN $5 = '' THEN NULL ELSE (SELECT id FROM "user" WHERE user_name = $6) END,
|
||||||
|
$7
|
||||||
|
)
|
||||||
|
ON CONFLICT (user_id, topic)
|
||||||
|
DO UPDATE SET read=excluded.read, write=excluded.write, owner_user_id=excluded.owner_user_id, provisioned=excluded.provisioned
|
||||||
|
`
|
||||||
|
postgresDeleteUserAccessQuery = `
|
||||||
|
DELETE FROM user_access
|
||||||
|
WHERE user_id = (SELECT id FROM "user" WHERE user_name = $1)
|
||||||
|
OR owner_user_id = (SELECT id FROM "user" WHERE user_name = $2)
|
||||||
|
`
|
||||||
|
postgresDeleteUserAccessProvisionedQuery = `DELETE FROM user_access WHERE provisioned = true`
|
||||||
|
postgresDeleteTopicAccessQuery = `
|
||||||
|
DELETE FROM user_access
|
||||||
|
WHERE (user_id = (SELECT id FROM "user" WHERE user_name = $1) OR owner_user_id = (SELECT id FROM "user" WHERE user_name = $2))
|
||||||
|
AND topic = $3
|
||||||
|
`
|
||||||
|
postgresDeleteAllAccessQuery = `DELETE FROM user_access`
|
||||||
|
|
||||||
|
// Token queries
|
||||||
|
postgresSelectTokenQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE user_id = $1 AND token = $2`
|
||||||
|
postgresSelectTokensQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE user_id = $1`
|
||||||
|
postgresSelectTokenCountQuery = `SELECT COUNT(*) FROM user_token WHERE user_id = $1`
|
||||||
|
postgresSelectAllProvisionedTokensQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE provisioned = true`
|
||||||
|
postgresUpsertTokenQuery = `
|
||||||
|
INSERT INTO user_token (user_id, token, label, last_access, last_origin, expires, provisioned)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (user_id, token)
|
||||||
|
DO UPDATE SET label = excluded.label, expires = excluded.expires, provisioned = excluded.provisioned
|
||||||
|
`
|
||||||
|
postgresUpdateTokenQuery = `UPDATE user_token SET label = $1, expires = $2 WHERE user_id = $3 AND token = $4`
|
||||||
|
postgresUpdateTokenLastAccessQuery = `UPDATE user_token SET last_access = $1, last_origin = $2 WHERE token = $3`
|
||||||
|
postgresDeleteTokenQuery = `DELETE FROM user_token WHERE user_id = $1 AND token = $2`
|
||||||
|
postgresDeleteProvisionedTokenQuery = `DELETE FROM user_token WHERE token = $1`
|
||||||
|
postgresDeleteAllTokenQuery = `DELETE FROM user_token WHERE user_id = $1`
|
||||||
|
postgresDeleteExpiredTokensQuery = `DELETE FROM user_token WHERE expires > 0 AND expires < $1`
|
||||||
|
postgresDeleteExcessTokensQuery = `
|
||||||
|
DELETE FROM user_token
|
||||||
|
WHERE user_id = $1
|
||||||
|
AND (user_id, token) NOT IN (
|
||||||
|
SELECT user_id, token
|
||||||
|
FROM user_token
|
||||||
|
WHERE user_id = $2
|
||||||
|
ORDER BY expires DESC
|
||||||
|
LIMIT $3
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
// Tier queries
|
||||||
|
postgresInsertTierQuery = `
|
||||||
|
INSERT INTO tier (id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
||||||
|
`
|
||||||
|
postgresUpdateTierQuery = `
|
||||||
|
UPDATE tier
|
||||||
|
SET name = $1, messages_limit = $2, messages_expiry_duration = $3, emails_limit = $4, calls_limit = $5, reservations_limit = $6, attachment_file_size_limit = $7, attachment_total_size_limit = $8, attachment_expiry_duration = $9, attachment_bandwidth_limit = $10, stripe_monthly_price_id = $11, stripe_yearly_price_id = $12
|
||||||
|
WHERE code = $13
|
||||||
|
`
|
||||||
|
postgresSelectTiersQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
`
|
||||||
|
postgresSelectTierByCodeQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
WHERE code = $1
|
||||||
|
`
|
||||||
|
postgresSelectTierByPriceIDQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
WHERE (stripe_monthly_price_id = $1 OR stripe_yearly_price_id = $2)
|
||||||
|
`
|
||||||
|
postgresDeleteTierQuery = `DELETE FROM tier WHERE code = $1`
|
||||||
|
|
||||||
|
// Phone queries
|
||||||
|
postgresSelectPhoneNumbersQuery = `SELECT phone_number FROM user_phone WHERE user_id = $1`
|
||||||
|
postgresInsertPhoneNumberQuery = `INSERT INTO user_phone (user_id, phone_number) VALUES ($1, $2)`
|
||||||
|
postgresDeletePhoneNumberQuery = `DELETE FROM user_phone WHERE user_id = $1 AND phone_number = $2`
|
||||||
|
|
||||||
|
// Billing queries
|
||||||
|
postgresUpdateBillingQuery = `
|
||||||
|
UPDATE "user"
|
||||||
|
SET stripe_customer_id = $1, stripe_subscription_id = $2, stripe_subscription_status = $3, stripe_subscription_interval = $4, stripe_subscription_paid_until = $5, stripe_subscription_cancel_at = $6
|
||||||
|
WHERE user_name = $7
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewPostgresManager creates a new Manager backed by a PostgreSQL database using an existing connection pool.
|
||||||
|
var postgresQueries = queries{
|
||||||
|
selectUserByID: postgresSelectUserByIDQuery,
|
||||||
|
selectUserByName: postgresSelectUserByNameQuery,
|
||||||
|
selectUserByToken: postgresSelectUserByTokenQuery,
|
||||||
|
selectUserByStripeCustomerID: postgresSelectUserByStripeCustomerIDQuery,
|
||||||
|
selectUsernames: postgresSelectUsernamesQuery,
|
||||||
|
selectUserCount: postgresSelectUserCountQuery,
|
||||||
|
selectUserIDFromUsername: postgresSelectUserIDFromUsernameQuery,
|
||||||
|
insertUser: postgresInsertUserQuery,
|
||||||
|
updateUserPass: postgresUpdateUserPassQuery,
|
||||||
|
updateUserRole: postgresUpdateUserRoleQuery,
|
||||||
|
updateUserProvisioned: postgresUpdateUserProvisionedQuery,
|
||||||
|
updateUserPrefs: postgresUpdateUserPrefsQuery,
|
||||||
|
updateUserStats: postgresUpdateUserStatsQuery,
|
||||||
|
updateUserStatsResetAll: postgresUpdateUserStatsResetAllQuery,
|
||||||
|
updateUserTier: postgresUpdateUserTierQuery,
|
||||||
|
updateUserDeleted: postgresUpdateUserDeletedQuery,
|
||||||
|
deleteUser: postgresDeleteUserQuery,
|
||||||
|
deleteUserTier: postgresDeleteUserTierQuery,
|
||||||
|
deleteUsersMarked: postgresDeleteUsersMarkedQuery,
|
||||||
|
selectTopicPerms: postgresSelectTopicPermsQuery,
|
||||||
|
selectUserAllAccess: postgresSelectUserAllAccessQuery,
|
||||||
|
selectUserAccess: postgresSelectUserAccessQuery,
|
||||||
|
selectUserReservations: postgresSelectUserReservationsQuery,
|
||||||
|
selectUserReservationsCount: postgresSelectUserReservationsCountQuery,
|
||||||
|
selectUserReservationsOwner: postgresSelectUserReservationsOwnerQuery,
|
||||||
|
selectUserHasReservation: postgresSelectUserHasReservationQuery,
|
||||||
|
selectOtherAccessCount: postgresSelectOtherAccessCountQuery,
|
||||||
|
upsertUserAccess: postgresUpsertUserAccessQuery,
|
||||||
|
deleteUserAccess: postgresDeleteUserAccessQuery,
|
||||||
|
deleteUserAccessProvisioned: postgresDeleteUserAccessProvisionedQuery,
|
||||||
|
deleteTopicAccess: postgresDeleteTopicAccessQuery,
|
||||||
|
deleteAllAccess: postgresDeleteAllAccessQuery,
|
||||||
|
selectToken: postgresSelectTokenQuery,
|
||||||
|
selectTokens: postgresSelectTokensQuery,
|
||||||
|
selectTokenCount: postgresSelectTokenCountQuery,
|
||||||
|
selectAllProvisionedTokens: postgresSelectAllProvisionedTokensQuery,
|
||||||
|
upsertToken: postgresUpsertTokenQuery,
|
||||||
|
updateToken: postgresUpdateTokenQuery,
|
||||||
|
updateTokenLastAccess: postgresUpdateTokenLastAccessQuery,
|
||||||
|
deleteToken: postgresDeleteTokenQuery,
|
||||||
|
deleteProvisionedToken: postgresDeleteProvisionedTokenQuery,
|
||||||
|
deleteAllToken: postgresDeleteAllTokenQuery,
|
||||||
|
deleteExpiredTokens: postgresDeleteExpiredTokensQuery,
|
||||||
|
deleteExcessTokens: postgresDeleteExcessTokensQuery,
|
||||||
|
insertTier: postgresInsertTierQuery,
|
||||||
|
selectTiers: postgresSelectTiersQuery,
|
||||||
|
selectTierByCode: postgresSelectTierByCodeQuery,
|
||||||
|
selectTierByPriceID: postgresSelectTierByPriceIDQuery,
|
||||||
|
updateTier: postgresUpdateTierQuery,
|
||||||
|
deleteTier: postgresDeleteTierQuery,
|
||||||
|
selectPhoneNumbers: postgresSelectPhoneNumbersQuery,
|
||||||
|
insertPhoneNumber: postgresInsertPhoneNumberQuery,
|
||||||
|
deletePhoneNumber: postgresDeletePhoneNumberQuery,
|
||||||
|
updateBilling: postgresUpdateBillingQuery,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPostgresManager creates a new Manager backed by a PostgreSQL database
|
||||||
|
func NewPostgresManager(db *sql.DB, config *Config) (*Manager, error) {
|
||||||
|
if err := setupPostgres(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newManager(db, postgresQueries, config)
|
||||||
|
}
|
||||||
113
user/manager_postgres_schema.go
Normal file
113
user/manager_postgres_schema.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initial PostgreSQL schema
|
||||||
|
const (
|
||||||
|
postgresCreateTablesQueries = `
|
||||||
|
CREATE TABLE IF NOT EXISTS tier (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
code TEXT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
messages_limit BIGINT NOT NULL,
|
||||||
|
messages_expiry_duration BIGINT NOT NULL,
|
||||||
|
emails_limit BIGINT NOT NULL,
|
||||||
|
calls_limit BIGINT NOT NULL,
|
||||||
|
reservations_limit BIGINT NOT NULL,
|
||||||
|
attachment_file_size_limit BIGINT NOT NULL,
|
||||||
|
attachment_total_size_limit BIGINT NOT NULL,
|
||||||
|
attachment_expiry_duration BIGINT NOT NULL,
|
||||||
|
attachment_bandwidth_limit BIGINT NOT NULL,
|
||||||
|
stripe_monthly_price_id TEXT,
|
||||||
|
stripe_yearly_price_id TEXT,
|
||||||
|
UNIQUE(code),
|
||||||
|
UNIQUE(stripe_monthly_price_id),
|
||||||
|
UNIQUE(stripe_yearly_price_id)
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS "user" (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
tier_id TEXT REFERENCES tier(id),
|
||||||
|
user_name TEXT NOT NULL UNIQUE,
|
||||||
|
pass TEXT NOT NULL,
|
||||||
|
role TEXT NOT NULL CHECK (role IN ('anonymous', 'admin', 'user')),
|
||||||
|
prefs JSONB NOT NULL DEFAULT '{}',
|
||||||
|
sync_topic TEXT NOT NULL,
|
||||||
|
provisioned BOOLEAN NOT NULL,
|
||||||
|
stats_messages BIGINT NOT NULL DEFAULT 0,
|
||||||
|
stats_emails BIGINT NOT NULL DEFAULT 0,
|
||||||
|
stats_calls BIGINT NOT NULL DEFAULT 0,
|
||||||
|
stripe_customer_id TEXT UNIQUE,
|
||||||
|
stripe_subscription_id TEXT UNIQUE,
|
||||||
|
stripe_subscription_status TEXT,
|
||||||
|
stripe_subscription_interval TEXT,
|
||||||
|
stripe_subscription_paid_until BIGINT,
|
||||||
|
stripe_subscription_cancel_at BIGINT,
|
||||||
|
created BIGINT NOT NULL,
|
||||||
|
deleted BIGINT
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_access (
|
||||||
|
user_id TEXT NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
read BOOLEAN NOT NULL,
|
||||||
|
write BOOLEAN NOT NULL,
|
||||||
|
owner_user_id TEXT REFERENCES "user"(id) ON DELETE CASCADE,
|
||||||
|
provisioned BOOLEAN NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, topic)
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_token (
|
||||||
|
user_id TEXT NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
|
||||||
|
token TEXT NOT NULL UNIQUE,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
last_access BIGINT NOT NULL,
|
||||||
|
last_origin TEXT NOT NULL,
|
||||||
|
expires BIGINT NOT NULL,
|
||||||
|
provisioned BOOLEAN NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, token)
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_phone (
|
||||||
|
user_id TEXT NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
|
||||||
|
phone_number TEXT NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, phone_number)
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS schema_version (
|
||||||
|
store TEXT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO "user" (id, user_name, pass, role, sync_topic, provisioned, created)
|
||||||
|
VALUES ('` + everyoneID + `', '*', '', 'anonymous', '', false, EXTRACT(EPOCH FROM NOW())::BIGINT)
|
||||||
|
ON CONFLICT (id) DO NOTHING;
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema table management queries for Postgres
|
||||||
|
const (
|
||||||
|
postgresCurrentSchemaVersion = 6
|
||||||
|
postgresSelectSchemaVersionQuery = `SELECT version FROM schema_version WHERE store = 'user'`
|
||||||
|
postgresInsertSchemaVersionQuery = `INSERT INTO schema_version (store, version) VALUES ('user', $1)`
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupPostgres(db *sql.DB) error {
|
||||||
|
var schemaVersion int
|
||||||
|
err := db.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewPostgres(db)
|
||||||
|
}
|
||||||
|
if schemaVersion > postgresCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, postgresCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
// Note: PostgreSQL migrations will be added when needed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewPostgres(db *sql.DB) error {
|
||||||
|
if _, err := db.Exec(postgresCreateTablesQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(postgresInsertSchemaVersionQuery, postgresCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
278
user/manager_sqlite.go
Normal file
278
user/manager_sqlite.go
Normal file
@@ -0,0 +1,278 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// User queries
|
||||||
|
sqliteSelectUserByIDQuery = `
|
||||||
|
SELECT u.id, u.user, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM user u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE u.id = ?
|
||||||
|
`
|
||||||
|
sqliteSelectUserByNameQuery = `
|
||||||
|
SELECT u.id, u.user, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM user u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE user = ?
|
||||||
|
`
|
||||||
|
sqliteSelectUserByTokenQuery = `
|
||||||
|
SELECT u.id, u.user, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM user u
|
||||||
|
JOIN user_token tk on u.id = tk.user_id
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE tk.token = ? AND (tk.expires = 0 OR tk.expires >= ?)
|
||||||
|
`
|
||||||
|
sqliteSelectUserByStripeCustomerIDQuery = `
|
||||||
|
SELECT u.id, u.user, u.pass, u.role, u.prefs, u.sync_topic, u.provisioned, u.stats_messages, u.stats_emails, u.stats_calls, u.stripe_customer_id, u.stripe_subscription_id, u.stripe_subscription_status, u.stripe_subscription_interval, u.stripe_subscription_paid_until, u.stripe_subscription_cancel_at, deleted, t.id, t.code, t.name, t.messages_limit, t.messages_expiry_duration, t.emails_limit, t.calls_limit, t.reservations_limit, t.attachment_file_size_limit, t.attachment_total_size_limit, t.attachment_expiry_duration, t.attachment_bandwidth_limit, t.stripe_monthly_price_id, t.stripe_yearly_price_id
|
||||||
|
FROM user u
|
||||||
|
LEFT JOIN tier t on t.id = u.tier_id
|
||||||
|
WHERE u.stripe_customer_id = ?
|
||||||
|
`
|
||||||
|
sqliteSelectUsernamesQuery = `
|
||||||
|
SELECT user
|
||||||
|
FROM user
|
||||||
|
ORDER BY
|
||||||
|
CASE role
|
||||||
|
WHEN 'admin' THEN 1
|
||||||
|
WHEN 'anonymous' THEN 3
|
||||||
|
ELSE 2
|
||||||
|
END, user
|
||||||
|
`
|
||||||
|
sqliteSelectUserCountQuery = `SELECT COUNT(*) FROM user`
|
||||||
|
sqliteSelectUserIDFromUsernameQuery = `SELECT id FROM user WHERE user = ?`
|
||||||
|
sqliteInsertUserQuery = `INSERT INTO user (id, user, pass, role, sync_topic, provisioned, created) VALUES (?, ?, ?, ?, ?, ?, ?)`
|
||||||
|
sqliteUpdateUserPassQuery = `UPDATE user SET pass = ? WHERE user = ?`
|
||||||
|
sqliteUpdateUserRoleQuery = `UPDATE user SET role = ? WHERE user = ?`
|
||||||
|
sqliteUpdateUserProvisionedQuery = `UPDATE user SET provisioned = ? WHERE user = ?`
|
||||||
|
sqliteUpdateUserPrefsQuery = `UPDATE user SET prefs = ? WHERE id = ?`
|
||||||
|
sqliteUpdateUserStatsQuery = `UPDATE user SET stats_messages = ?, stats_emails = ?, stats_calls = ? WHERE id = ?`
|
||||||
|
sqliteUpdateUserStatsResetAllQuery = `UPDATE user SET stats_messages = 0, stats_emails = 0, stats_calls = 0`
|
||||||
|
sqliteUpdateUserTierQuery = `UPDATE user SET tier_id = (SELECT id FROM tier WHERE code = ?) WHERE user = ?`
|
||||||
|
sqliteUpdateUserDeletedQuery = `UPDATE user SET deleted = ? WHERE id = ?`
|
||||||
|
sqliteDeleteUserQuery = `DELETE FROM user WHERE user = ?`
|
||||||
|
sqliteDeleteUserTierQuery = `UPDATE user SET tier_id = null WHERE user = ?`
|
||||||
|
sqliteDeleteUsersMarkedQuery = `DELETE FROM user WHERE deleted < ?`
|
||||||
|
|
||||||
|
// Access queries
|
||||||
|
sqliteSelectTopicPermsQuery = `
|
||||||
|
SELECT read, write
|
||||||
|
FROM user_access a
|
||||||
|
JOIN user u ON u.id = a.user_id
|
||||||
|
WHERE (u.user = ? OR u.user = ?) AND ? LIKE a.topic ESCAPE '\'
|
||||||
|
ORDER BY u.user DESC, LENGTH(a.topic) DESC, a.write DESC
|
||||||
|
`
|
||||||
|
sqliteSelectUserAllAccessQuery = `
|
||||||
|
SELECT user_id, topic, read, write, provisioned
|
||||||
|
FROM user_access
|
||||||
|
ORDER BY LENGTH(topic) DESC, write DESC, read DESC, topic
|
||||||
|
`
|
||||||
|
sqliteSelectUserAccessQuery = `
|
||||||
|
SELECT topic, read, write, provisioned
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
ORDER BY LENGTH(topic) DESC, write DESC, read DESC, topic
|
||||||
|
`
|
||||||
|
sqliteSelectUserReservationsQuery = `
|
||||||
|
SELECT a_user.topic, a_user.read, a_user.write, a_everyone.read AS everyone_read, a_everyone.write AS everyone_write
|
||||||
|
FROM user_access a_user
|
||||||
|
LEFT JOIN user_access a_everyone ON a_user.topic = a_everyone.topic AND a_everyone.user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
WHERE a_user.user_id = a_user.owner_user_id
|
||||||
|
AND a_user.owner_user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
ORDER BY a_user.topic
|
||||||
|
`
|
||||||
|
sqliteSelectUserReservationsCountQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = owner_user_id
|
||||||
|
AND owner_user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
`
|
||||||
|
sqliteSelectUserReservationsOwnerQuery = `
|
||||||
|
SELECT owner_user_id
|
||||||
|
FROM user_access
|
||||||
|
WHERE topic = ?
|
||||||
|
AND user_id = owner_user_id
|
||||||
|
`
|
||||||
|
sqliteSelectUserHasReservationQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE user_id = owner_user_id
|
||||||
|
AND owner_user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
AND topic = ?
|
||||||
|
`
|
||||||
|
sqliteSelectOtherAccessCountQuery = `
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM user_access
|
||||||
|
WHERE (topic = ? OR ? LIKE topic ESCAPE '\')
|
||||||
|
AND (owner_user_id IS NULL OR owner_user_id != (SELECT id FROM user WHERE user = ?))
|
||||||
|
`
|
||||||
|
sqliteUpsertUserAccessQuery = `
|
||||||
|
INSERT INTO user_access (user_id, topic, read, write, owner_user_id, provisioned)
|
||||||
|
VALUES ((SELECT id FROM user WHERE user = ?), ?, ?, ?, (SELECT IIF(?='',NULL,(SELECT id FROM user WHERE user=?))), ?)
|
||||||
|
ON CONFLICT (user_id, topic)
|
||||||
|
DO UPDATE SET read=excluded.read, write=excluded.write, owner_user_id=excluded.owner_user_id, provisioned=excluded.provisioned
|
||||||
|
`
|
||||||
|
sqliteDeleteUserAccessQuery = `
|
||||||
|
DELETE FROM user_access
|
||||||
|
WHERE user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
OR owner_user_id = (SELECT id FROM user WHERE user = ?)
|
||||||
|
`
|
||||||
|
sqliteDeleteUserAccessProvisionedQuery = `DELETE FROM user_access WHERE provisioned = 1`
|
||||||
|
sqliteDeleteTopicAccessQuery = `
|
||||||
|
DELETE FROM user_access
|
||||||
|
WHERE (user_id = (SELECT id FROM user WHERE user = ?) OR owner_user_id = (SELECT id FROM user WHERE user = ?))
|
||||||
|
AND topic = ?
|
||||||
|
`
|
||||||
|
sqliteDeleteAllAccessQuery = `DELETE FROM user_access`
|
||||||
|
|
||||||
|
// Token queries
|
||||||
|
sqliteSelectTokenQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE user_id = ? AND token = ?`
|
||||||
|
sqliteSelectTokensQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE user_id = ?`
|
||||||
|
sqliteSelectTokenCountQuery = `SELECT COUNT(*) FROM user_token WHERE user_id = ?`
|
||||||
|
sqliteSelectAllProvisionedTokensQuery = `SELECT token, label, last_access, last_origin, expires, provisioned FROM user_token WHERE provisioned = 1`
|
||||||
|
sqliteUpsertTokenQuery = `
|
||||||
|
INSERT INTO user_token (user_id, token, label, last_access, last_origin, expires, provisioned)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
|
ON CONFLICT (user_id, token)
|
||||||
|
DO UPDATE SET label = excluded.label, expires = excluded.expires, provisioned = excluded.provisioned
|
||||||
|
`
|
||||||
|
sqliteUpdateTokenQuery = `UPDATE user_token SET label = ?, expires = ? WHERE user_id = ? AND token = ?`
|
||||||
|
sqliteUpdateTokenLastAccessQuery = `UPDATE user_token SET last_access = ?, last_origin = ? WHERE token = ?`
|
||||||
|
sqliteDeleteTokenQuery = `DELETE FROM user_token WHERE user_id = ? AND token = ?`
|
||||||
|
sqliteDeleteProvisionedTokenQuery = `DELETE FROM user_token WHERE token = ?`
|
||||||
|
sqliteDeleteAllTokenQuery = `DELETE FROM user_token WHERE user_id = ?`
|
||||||
|
sqliteDeleteExpiredTokensQuery = `DELETE FROM user_token WHERE expires > 0 AND expires < ?`
|
||||||
|
sqliteDeleteExcessTokensQuery = `
|
||||||
|
DELETE FROM user_token
|
||||||
|
WHERE user_id = ?
|
||||||
|
AND (user_id, token) NOT IN (
|
||||||
|
SELECT user_id, token
|
||||||
|
FROM user_token
|
||||||
|
WHERE user_id = ?
|
||||||
|
ORDER BY expires DESC
|
||||||
|
LIMIT ?
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
// Tier queries
|
||||||
|
sqliteInsertTierQuery = `
|
||||||
|
INSERT INTO tier (id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`
|
||||||
|
sqliteUpdateTierQuery = `
|
||||||
|
UPDATE tier
|
||||||
|
SET name = ?, messages_limit = ?, messages_expiry_duration = ?, emails_limit = ?, calls_limit = ?, reservations_limit = ?, attachment_file_size_limit = ?, attachment_total_size_limit = ?, attachment_expiry_duration = ?, attachment_bandwidth_limit = ?, stripe_monthly_price_id = ?, stripe_yearly_price_id = ?
|
||||||
|
WHERE code = ?
|
||||||
|
`
|
||||||
|
sqliteSelectTiersQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
`
|
||||||
|
sqliteSelectTierByCodeQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
WHERE code = ?
|
||||||
|
`
|
||||||
|
sqliteSelectTierByPriceIDQuery = `
|
||||||
|
SELECT id, code, name, messages_limit, messages_expiry_duration, emails_limit, calls_limit, reservations_limit, attachment_file_size_limit, attachment_total_size_limit, attachment_expiry_duration, attachment_bandwidth_limit, stripe_monthly_price_id, stripe_yearly_price_id
|
||||||
|
FROM tier
|
||||||
|
WHERE (stripe_monthly_price_id = ? OR stripe_yearly_price_id = ?)
|
||||||
|
`
|
||||||
|
sqliteDeleteTierQuery = `DELETE FROM tier WHERE code = ?`
|
||||||
|
|
||||||
|
// Phone queries
|
||||||
|
sqliteSelectPhoneNumbersQuery = `SELECT phone_number FROM user_phone WHERE user_id = ?`
|
||||||
|
sqliteInsertPhoneNumberQuery = `INSERT INTO user_phone (user_id, phone_number) VALUES (?, ?)`
|
||||||
|
sqliteDeletePhoneNumberQuery = `DELETE FROM user_phone WHERE user_id = ? AND phone_number = ?`
|
||||||
|
|
||||||
|
// Billing queries
|
||||||
|
sqliteUpdateBillingQuery = `
|
||||||
|
UPDATE user
|
||||||
|
SET stripe_customer_id = ?, stripe_subscription_id = ?, stripe_subscription_status = ?, stripe_subscription_interval = ?, stripe_subscription_paid_until = ?, stripe_subscription_cancel_at = ?
|
||||||
|
WHERE user = ?
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
var sqliteQueries = queries{
|
||||||
|
selectUserByID: sqliteSelectUserByIDQuery,
|
||||||
|
selectUserByName: sqliteSelectUserByNameQuery,
|
||||||
|
selectUserByToken: sqliteSelectUserByTokenQuery,
|
||||||
|
selectUserByStripeCustomerID: sqliteSelectUserByStripeCustomerIDQuery,
|
||||||
|
selectUsernames: sqliteSelectUsernamesQuery,
|
||||||
|
selectUserCount: sqliteSelectUserCountQuery,
|
||||||
|
selectUserIDFromUsername: sqliteSelectUserIDFromUsernameQuery,
|
||||||
|
insertUser: sqliteInsertUserQuery,
|
||||||
|
updateUserPass: sqliteUpdateUserPassQuery,
|
||||||
|
updateUserRole: sqliteUpdateUserRoleQuery,
|
||||||
|
updateUserProvisioned: sqliteUpdateUserProvisionedQuery,
|
||||||
|
updateUserPrefs: sqliteUpdateUserPrefsQuery,
|
||||||
|
updateUserStats: sqliteUpdateUserStatsQuery,
|
||||||
|
updateUserStatsResetAll: sqliteUpdateUserStatsResetAllQuery,
|
||||||
|
updateUserTier: sqliteUpdateUserTierQuery,
|
||||||
|
updateUserDeleted: sqliteUpdateUserDeletedQuery,
|
||||||
|
deleteUser: sqliteDeleteUserQuery,
|
||||||
|
deleteUserTier: sqliteDeleteUserTierQuery,
|
||||||
|
deleteUsersMarked: sqliteDeleteUsersMarkedQuery,
|
||||||
|
selectTopicPerms: sqliteSelectTopicPermsQuery,
|
||||||
|
selectUserAllAccess: sqliteSelectUserAllAccessQuery,
|
||||||
|
selectUserAccess: sqliteSelectUserAccessQuery,
|
||||||
|
selectUserReservations: sqliteSelectUserReservationsQuery,
|
||||||
|
selectUserReservationsCount: sqliteSelectUserReservationsCountQuery,
|
||||||
|
selectUserReservationsOwner: sqliteSelectUserReservationsOwnerQuery,
|
||||||
|
selectUserHasReservation: sqliteSelectUserHasReservationQuery,
|
||||||
|
selectOtherAccessCount: sqliteSelectOtherAccessCountQuery,
|
||||||
|
upsertUserAccess: sqliteUpsertUserAccessQuery,
|
||||||
|
deleteUserAccess: sqliteDeleteUserAccessQuery,
|
||||||
|
deleteUserAccessProvisioned: sqliteDeleteUserAccessProvisionedQuery,
|
||||||
|
deleteTopicAccess: sqliteDeleteTopicAccessQuery,
|
||||||
|
deleteAllAccess: sqliteDeleteAllAccessQuery,
|
||||||
|
selectToken: sqliteSelectTokenQuery,
|
||||||
|
selectTokens: sqliteSelectTokensQuery,
|
||||||
|
selectTokenCount: sqliteSelectTokenCountQuery,
|
||||||
|
selectAllProvisionedTokens: sqliteSelectAllProvisionedTokensQuery,
|
||||||
|
upsertToken: sqliteUpsertTokenQuery,
|
||||||
|
updateToken: sqliteUpdateTokenQuery,
|
||||||
|
updateTokenLastAccess: sqliteUpdateTokenLastAccessQuery,
|
||||||
|
deleteToken: sqliteDeleteTokenQuery,
|
||||||
|
deleteProvisionedToken: sqliteDeleteProvisionedTokenQuery,
|
||||||
|
deleteAllToken: sqliteDeleteAllTokenQuery,
|
||||||
|
deleteExpiredTokens: sqliteDeleteExpiredTokensQuery,
|
||||||
|
deleteExcessTokens: sqliteDeleteExcessTokensQuery,
|
||||||
|
insertTier: sqliteInsertTierQuery,
|
||||||
|
selectTiers: sqliteSelectTiersQuery,
|
||||||
|
selectTierByCode: sqliteSelectTierByCodeQuery,
|
||||||
|
selectTierByPriceID: sqliteSelectTierByPriceIDQuery,
|
||||||
|
updateTier: sqliteUpdateTierQuery,
|
||||||
|
deleteTier: sqliteDeleteTierQuery,
|
||||||
|
selectPhoneNumbers: sqliteSelectPhoneNumbersQuery,
|
||||||
|
insertPhoneNumber: sqliteInsertPhoneNumberQuery,
|
||||||
|
deletePhoneNumber: sqliteDeletePhoneNumberQuery,
|
||||||
|
updateBilling: sqliteUpdateBillingQuery,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSQLiteManager creates a new Manager backed by a SQLite database
|
||||||
|
func NewSQLiteManager(filename, startupQueries string, config *Config) (*Manager, error) {
|
||||||
|
parentDir := filepath.Dir(filename)
|
||||||
|
if !util.FileExists(parentDir) {
|
||||||
|
return nil, fmt.Errorf("user database directory %s does not exist or is not accessible", parentDir)
|
||||||
|
}
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := setupSQLite(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := runSQLiteStartupQueries(db, startupQueries); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newManager(db, sqliteQueries, config)
|
||||||
|
}
|
||||||
@@ -2,19 +2,116 @@ package user
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"heckel.io/ntfy/v2/log"
|
"heckel.io/ntfy/v2/log"
|
||||||
"heckel.io/ntfy/v2/util"
|
"heckel.io/ntfy/v2/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Schema management queries
|
// Initial SQLite schema
|
||||||
const (
|
const (
|
||||||
currentSchemaVersion = 6
|
sqliteCreateTablesQueries = `
|
||||||
insertSchemaVersion = `INSERT INTO schemaVersion VALUES (1, ?)`
|
BEGIN;
|
||||||
updateSchemaVersion = `UPDATE schemaVersion SET version = ? WHERE id = 1`
|
CREATE TABLE IF NOT EXISTS tier (
|
||||||
selectSchemaVersionQuery = `SELECT version FROM schemaVersion WHERE id = 1`
|
id TEXT PRIMARY KEY,
|
||||||
|
code TEXT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
messages_limit INT NOT NULL,
|
||||||
|
messages_expiry_duration INT NOT NULL,
|
||||||
|
emails_limit INT NOT NULL,
|
||||||
|
calls_limit INT NOT NULL,
|
||||||
|
reservations_limit INT NOT NULL,
|
||||||
|
attachment_file_size_limit INT NOT NULL,
|
||||||
|
attachment_total_size_limit INT NOT NULL,
|
||||||
|
attachment_expiry_duration INT NOT NULL,
|
||||||
|
attachment_bandwidth_limit INT NOT NULL,
|
||||||
|
stripe_monthly_price_id TEXT,
|
||||||
|
stripe_yearly_price_id TEXT
|
||||||
|
);
|
||||||
|
CREATE UNIQUE INDEX idx_tier_code ON tier (code);
|
||||||
|
CREATE UNIQUE INDEX idx_tier_stripe_monthly_price_id ON tier (stripe_monthly_price_id);
|
||||||
|
CREATE UNIQUE INDEX idx_tier_stripe_yearly_price_id ON tier (stripe_yearly_price_id);
|
||||||
|
CREATE TABLE IF NOT EXISTS user (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
tier_id TEXT,
|
||||||
|
user TEXT NOT NULL,
|
||||||
|
pass TEXT NOT NULL,
|
||||||
|
role TEXT CHECK (role IN ('anonymous', 'admin', 'user')) NOT NULL,
|
||||||
|
prefs JSON NOT NULL DEFAULT '{}',
|
||||||
|
sync_topic TEXT NOT NULL,
|
||||||
|
provisioned INT NOT NULL,
|
||||||
|
stats_messages INT NOT NULL DEFAULT (0),
|
||||||
|
stats_emails INT NOT NULL DEFAULT (0),
|
||||||
|
stats_calls INT NOT NULL DEFAULT (0),
|
||||||
|
stripe_customer_id TEXT,
|
||||||
|
stripe_subscription_id TEXT,
|
||||||
|
stripe_subscription_status TEXT,
|
||||||
|
stripe_subscription_interval TEXT,
|
||||||
|
stripe_subscription_paid_until INT,
|
||||||
|
stripe_subscription_cancel_at INT,
|
||||||
|
created INT NOT NULL,
|
||||||
|
deleted INT,
|
||||||
|
FOREIGN KEY (tier_id) REFERENCES tier (id)
|
||||||
|
);
|
||||||
|
CREATE UNIQUE INDEX idx_user ON user (user);
|
||||||
|
CREATE UNIQUE INDEX idx_user_stripe_customer_id ON user (stripe_customer_id);
|
||||||
|
CREATE UNIQUE INDEX idx_user_stripe_subscription_id ON user (stripe_subscription_id);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_access (
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
read INT NOT NULL,
|
||||||
|
write INT NOT NULL,
|
||||||
|
owner_user_id INT,
|
||||||
|
provisioned INT NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, topic),
|
||||||
|
FOREIGN KEY (user_id) REFERENCES user (id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY (owner_user_id) REFERENCES user (id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_token (
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
token TEXT NOT NULL,
|
||||||
|
label TEXT NOT NULL,
|
||||||
|
last_access INT NOT NULL,
|
||||||
|
last_origin TEXT NOT NULL,
|
||||||
|
expires INT NOT NULL,
|
||||||
|
provisioned INT NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, token),
|
||||||
|
FOREIGN KEY (user_id) REFERENCES user (id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE UNIQUE INDEX idx_user_token ON user_token (token);
|
||||||
|
CREATE TABLE IF NOT EXISTS user_phone (
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
phone_number TEXT NOT NULL,
|
||||||
|
PRIMARY KEY (user_id, phone_number),
|
||||||
|
FOREIGN KEY (user_id) REFERENCES user (id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE TABLE IF NOT EXISTS schemaVersion (
|
||||||
|
id INT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO user (id, user, pass, role, sync_topic, provisioned, created)
|
||||||
|
VALUES ('` + everyoneID + `', '*', '', 'anonymous', '', false, UNIXEPOCH())
|
||||||
|
ON CONFLICT (id) DO NOTHING;
|
||||||
|
COMMIT;
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sqliteBuiltinStartupQueries = `PRAGMA foreign_keys = ON;`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema version table management for SQLite
|
||||||
|
const (
|
||||||
|
sqliteCurrentSchemaVersion = 6
|
||||||
|
sqliteInsertSchemaVersionQuery = `INSERT INTO schemaVersion VALUES (1, ?)`
|
||||||
|
sqliteUpdateSchemaVersionQuery = `UPDATE schemaVersion SET version = ? WHERE id = 1`
|
||||||
|
sqliteSelectSchemaVersionQuery = `SELECT version FROM schemaVersion WHERE id = 1`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema migrations for SQLite
|
||||||
|
const (
|
||||||
// 1 -> 2 (complex migration!)
|
// 1 -> 2 (complex migration!)
|
||||||
migrate1To2CreateTablesQueries = `
|
sqliteMigrate1To2CreateTablesQueries = `
|
||||||
ALTER TABLE user RENAME TO user_old;
|
ALTER TABLE user RENAME TO user_old;
|
||||||
CREATE TABLE IF NOT EXISTS tier (
|
CREATE TABLE IF NOT EXISTS tier (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
@@ -82,12 +179,12 @@ const (
|
|||||||
VALUES ('u_everyone', '*', '', 'anonymous', '', UNIXEPOCH())
|
VALUES ('u_everyone', '*', '', 'anonymous', '', UNIXEPOCH())
|
||||||
ON CONFLICT (id) DO NOTHING;
|
ON CONFLICT (id) DO NOTHING;
|
||||||
`
|
`
|
||||||
migrate1To2SelectAllOldUsernamesNoTx = `SELECT user FROM user_old`
|
sqliteMigrate1To2SelectAllOldUsernamesNoTxQuery = `SELECT user FROM user_old`
|
||||||
migrate1To2InsertUserNoTx = `
|
sqliteMigrate1To2InsertUserNoTxQuery = `
|
||||||
INSERT INTO user (id, user, pass, role, sync_topic, created)
|
INSERT INTO user (id, user, pass, role, sync_topic, created)
|
||||||
SELECT ?, user, pass, role, ?, UNIXEPOCH() FROM user_old WHERE user = ?
|
SELECT ?, user, pass, role, ?, UNIXEPOCH() FROM user_old WHERE user = ?
|
||||||
`
|
`
|
||||||
migrate1To2InsertFromOldTablesAndDropNoTx = `
|
sqliteMigrate1To2InsertFromOldTablesAndDropNoTxQuery = `
|
||||||
INSERT INTO user_access (user_id, topic, read, write)
|
INSERT INTO user_access (user_id, topic, read, write)
|
||||||
SELECT u.id, a.topic, a.read, a.write
|
SELECT u.id, a.topic, a.read, a.write
|
||||||
FROM user u
|
FROM user u
|
||||||
@@ -98,7 +195,7 @@ const (
|
|||||||
`
|
`
|
||||||
|
|
||||||
// 2 -> 3
|
// 2 -> 3
|
||||||
migrate2To3UpdateQueries = `
|
sqliteMigrate2To3UpdateQueries = `
|
||||||
ALTER TABLE user ADD COLUMN stripe_subscription_interval TEXT;
|
ALTER TABLE user ADD COLUMN stripe_subscription_interval TEXT;
|
||||||
ALTER TABLE tier RENAME COLUMN stripe_price_id TO stripe_monthly_price_id;
|
ALTER TABLE tier RENAME COLUMN stripe_price_id TO stripe_monthly_price_id;
|
||||||
ALTER TABLE tier ADD COLUMN stripe_yearly_price_id TEXT;
|
ALTER TABLE tier ADD COLUMN stripe_yearly_price_id TEXT;
|
||||||
@@ -108,7 +205,7 @@ const (
|
|||||||
`
|
`
|
||||||
|
|
||||||
// 3 -> 4
|
// 3 -> 4
|
||||||
migrate3To4UpdateQueries = `
|
sqliteMigrate3To4UpdateQueries = `
|
||||||
ALTER TABLE tier ADD COLUMN calls_limit INT NOT NULL DEFAULT (0);
|
ALTER TABLE tier ADD COLUMN calls_limit INT NOT NULL DEFAULT (0);
|
||||||
ALTER TABLE user ADD COLUMN stats_calls INT NOT NULL DEFAULT (0);
|
ALTER TABLE user ADD COLUMN stats_calls INT NOT NULL DEFAULT (0);
|
||||||
CREATE TABLE IF NOT EXISTS user_phone (
|
CREATE TABLE IF NOT EXISTS user_phone (
|
||||||
@@ -120,12 +217,12 @@ const (
|
|||||||
`
|
`
|
||||||
|
|
||||||
// 4 -> 5
|
// 4 -> 5
|
||||||
migrate4To5UpdateQueries = `
|
sqliteMigrate4To5UpdateQueries = `
|
||||||
UPDATE user_access SET topic = REPLACE(topic, '_', '\_');
|
UPDATE user_access SET topic = REPLACE(topic, '_', '\_');
|
||||||
`
|
`
|
||||||
|
|
||||||
// 5 -> 6
|
// 5 -> 6
|
||||||
migrate5To6UpdateQueries = `
|
sqliteMigrate5To6UpdateQueries = `
|
||||||
PRAGMA foreign_keys=off;
|
PRAGMA foreign_keys=off;
|
||||||
|
|
||||||
-- Alter user table: Add provisioned column
|
-- Alter user table: Add provisioned column
|
||||||
@@ -220,16 +317,60 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
migrations = map[int]func(db *sql.DB) error{
|
sqliteMigrations = map[int]func(db *sql.DB) error{
|
||||||
1: migrateFrom1,
|
1: sqliteMigrateFrom1,
|
||||||
2: migrateFrom2,
|
2: sqliteMigrateFrom2,
|
||||||
3: migrateFrom3,
|
3: sqliteMigrateFrom3,
|
||||||
4: migrateFrom4,
|
4: sqliteMigrateFrom4,
|
||||||
5: migrateFrom5,
|
5: sqliteMigrateFrom5,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func migrateFrom1(db *sql.DB) error {
|
func setupSQLite(db *sql.DB) error {
|
||||||
|
var schemaVersion int
|
||||||
|
err := db.QueryRow(sqliteSelectSchemaVersionQuery).Scan(&schemaVersion)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewSQLite(db)
|
||||||
|
}
|
||||||
|
if schemaVersion == sqliteCurrentSchemaVersion {
|
||||||
|
return nil
|
||||||
|
} else if schemaVersion > sqliteCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, sqliteCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
for i := schemaVersion; i < sqliteCurrentSchemaVersion; i++ {
|
||||||
|
fn, ok := sqliteMigrations[i]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot find migration step from schema version %d to %d", i, i+1)
|
||||||
|
} else if err := fn(db); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewSQLite(db *sql.DB) error {
|
||||||
|
if _, err := db.Exec(sqliteCreateTablesQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteInsertSchemaVersionQuery, sqliteCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSQLiteStartupQueries(db *sql.DB, startupQueries string) error {
|
||||||
|
if _, err := db.Exec(sqliteBuiltinStartupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if startupQueries != "" {
|
||||||
|
if _, err := db.Exec(startupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sqliteMigrateFrom1(db *sql.DB) error {
|
||||||
log.Tag(tag).Info("Migrating user database schema: from 1 to 2")
|
log.Tag(tag).Info("Migrating user database schema: from 1 to 2")
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -237,11 +378,11 @@ func migrateFrom1(db *sql.DB) error {
|
|||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
// Rename user -> user_old, and create new tables
|
// Rename user -> user_old, and create new tables
|
||||||
if _, err := tx.Exec(migrate1To2CreateTablesQueries); err != nil {
|
if _, err := tx.Exec(sqliteMigrate1To2CreateTablesQueries); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Insert users from user_old into new user table, with ID and sync_topic
|
// Insert users from user_old into new user table, with ID and sync_topic
|
||||||
rows, err := tx.Query(migrate1To2SelectAllOldUsernamesNoTx)
|
rows, err := tx.Query(sqliteMigrate1To2SelectAllOldUsernamesNoTxQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -260,15 +401,15 @@ func migrateFrom1(db *sql.DB) error {
|
|||||||
for _, username := range usernames {
|
for _, username := range usernames {
|
||||||
userID := util.RandomStringPrefix(userIDPrefix, userIDLength)
|
userID := util.RandomStringPrefix(userIDPrefix, userIDLength)
|
||||||
syncTopic := util.RandomStringPrefix(syncTopicPrefix, syncTopicLength)
|
syncTopic := util.RandomStringPrefix(syncTopicPrefix, syncTopicLength)
|
||||||
if _, err := tx.Exec(migrate1To2InsertUserNoTx, userID, syncTopic, username); err != nil {
|
if _, err := tx.Exec(sqliteMigrate1To2InsertUserNoTxQuery, userID, syncTopic, username); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Migrate old "access" table to "user_access" and drop "access" and "user_old"
|
// Migrate old "access" table to "user_access" and drop "access" and "user_old"
|
||||||
if _, err := tx.Exec(migrate1To2InsertFromOldTablesAndDropNoTx); err != nil {
|
if _, err := tx.Exec(sqliteMigrate1To2InsertFromOldTablesAndDropNoTxQuery); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(updateSchemaVersion, 2); err != nil {
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 2); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
@@ -277,65 +418,65 @@ func migrateFrom1(db *sql.DB) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateFrom2(db *sql.DB) error {
|
func sqliteMigrateFrom2(db *sql.DB) error {
|
||||||
log.Tag(tag).Info("Migrating user database schema: from 2 to 3")
|
log.Tag(tag).Info("Migrating user database schema: from 2 to 3")
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
if _, err := tx.Exec(migrate2To3UpdateQueries); err != nil {
|
if _, err := tx.Exec(sqliteMigrate2To3UpdateQueries); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(updateSchemaVersion, 3); err != nil {
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 3); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateFrom3(db *sql.DB) error {
|
func sqliteMigrateFrom3(db *sql.DB) error {
|
||||||
log.Tag(tag).Info("Migrating user database schema: from 3 to 4")
|
log.Tag(tag).Info("Migrating user database schema: from 3 to 4")
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
if _, err := tx.Exec(migrate3To4UpdateQueries); err != nil {
|
if _, err := tx.Exec(sqliteMigrate3To4UpdateQueries); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(updateSchemaVersion, 4); err != nil {
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 4); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateFrom4(db *sql.DB) error {
|
func sqliteMigrateFrom4(db *sql.DB) error {
|
||||||
log.Tag(tag).Info("Migrating user database schema: from 4 to 5")
|
log.Tag(tag).Info("Migrating user database schema: from 4 to 5")
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
if _, err := tx.Exec(migrate4To5UpdateQueries); err != nil {
|
if _, err := tx.Exec(sqliteMigrate4To5UpdateQueries); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(updateSchemaVersion, 5); err != nil {
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 5); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateFrom5(db *sql.DB) error {
|
func sqliteMigrateFrom5(db *sql.DB) error {
|
||||||
log.Tag(tag).Info("Migrating user database schema: from 5 to 6")
|
log.Tag(tag).Info("Migrating user database schema: from 5 to 6")
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
if _, err := tx.Exec(migrate5To6UpdateQueries); err != nil {
|
if _, err := tx.Exec(sqliteMigrate5To6UpdateQueries); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(updateSchemaVersion, 6); err != nil {
|
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 6); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -2,11 +2,12 @@ package user
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"heckel.io/ntfy/v2/log"
|
|
||||||
"heckel.io/ntfy/v2/payments"
|
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/log"
|
||||||
|
"heckel.io/ntfy/v2/payments"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User is a struct that represents a user
|
// User is a struct that represents a user
|
||||||
@@ -242,6 +243,20 @@ const (
|
|||||||
everyoneID = "u_everyone"
|
everyoneID = "u_everyone"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Config holds the configuration for the user Manager
|
||||||
|
type Config struct {
|
||||||
|
Filename string // Database filename, e.g. "/var/lib/ntfy/user.db" (SQLite)
|
||||||
|
DatabaseURL string // Database connection string (PostgreSQL)
|
||||||
|
StartupQueries string // Queries to run on startup, e.g. to create initial users or tiers (SQLite only)
|
||||||
|
DefaultAccess Permission // Default permission if no ACL matches
|
||||||
|
ProvisionEnabled bool // Hack: Enable auto-provisioning of users and access grants, disabled for "ntfy user" commands
|
||||||
|
Users []*User // Predefined users to create on startup
|
||||||
|
Access map[string][]*Grant // Predefined access grants to create on startup (username -> []*Grant)
|
||||||
|
Tokens map[string][]*Token // Predefined users to create on startup (username -> []*Token)
|
||||||
|
QueueWriterInterval time.Duration // Interval for the async queue writer to flush stats and token updates to the database
|
||||||
|
BcryptCost int // Cost of generated passwords; lowering makes testing faster
|
||||||
|
}
|
||||||
|
|
||||||
// Error constants used by the package
|
// Error constants used by the package
|
||||||
var (
|
var (
|
||||||
ErrUnauthenticated = errors.New("unauthenticated")
|
ErrUnauthenticated = errors.New("unauthenticated")
|
||||||
@@ -259,3 +274,72 @@ var (
|
|||||||
ErrProvisionedUserChange = errors.New("cannot change or delete provisioned user")
|
ErrProvisionedUserChange = errors.New("cannot change or delete provisioned user")
|
||||||
ErrProvisionedTokenChange = errors.New("cannot change or delete provisioned token")
|
ErrProvisionedTokenChange = errors.New("cannot change or delete provisioned token")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// queries holds the database-specific SQL queries
|
||||||
|
type queries struct {
|
||||||
|
// User queries
|
||||||
|
selectUserByID string
|
||||||
|
selectUserByName string
|
||||||
|
selectUserByToken string
|
||||||
|
selectUserByStripeCustomerID string
|
||||||
|
selectUsernames string
|
||||||
|
selectUserCount string
|
||||||
|
selectUserIDFromUsername string
|
||||||
|
insertUser string
|
||||||
|
updateUserPass string
|
||||||
|
updateUserRole string
|
||||||
|
updateUserProvisioned string
|
||||||
|
updateUserPrefs string
|
||||||
|
updateUserStats string
|
||||||
|
updateUserStatsResetAll string
|
||||||
|
updateUserTier string
|
||||||
|
updateUserDeleted string
|
||||||
|
deleteUser string
|
||||||
|
deleteUserTier string
|
||||||
|
deleteUsersMarked string
|
||||||
|
|
||||||
|
// Access queries
|
||||||
|
selectTopicPerms string
|
||||||
|
selectUserAllAccess string
|
||||||
|
selectUserAccess string
|
||||||
|
selectUserReservations string
|
||||||
|
selectUserReservationsCount string
|
||||||
|
selectUserReservationsOwner string
|
||||||
|
selectUserHasReservation string
|
||||||
|
selectOtherAccessCount string
|
||||||
|
upsertUserAccess string
|
||||||
|
deleteUserAccess string
|
||||||
|
deleteUserAccessProvisioned string
|
||||||
|
deleteTopicAccess string
|
||||||
|
deleteAllAccess string
|
||||||
|
|
||||||
|
// Token queries
|
||||||
|
selectToken string
|
||||||
|
selectTokens string
|
||||||
|
selectTokenCount string
|
||||||
|
selectAllProvisionedTokens string
|
||||||
|
upsertToken string
|
||||||
|
updateToken string
|
||||||
|
updateTokenLastAccess string
|
||||||
|
deleteToken string
|
||||||
|
deleteProvisionedToken string
|
||||||
|
deleteAllToken string
|
||||||
|
deleteExpiredTokens string
|
||||||
|
deleteExcessTokens string
|
||||||
|
|
||||||
|
// Tier queries
|
||||||
|
insertTier string
|
||||||
|
selectTiers string
|
||||||
|
selectTierByCode string
|
||||||
|
selectTierByPriceID string
|
||||||
|
updateTier string
|
||||||
|
deleteTier string
|
||||||
|
|
||||||
|
// Phone queries
|
||||||
|
selectPhoneNumbers string
|
||||||
|
insertPhoneNumber string
|
||||||
|
deletePhoneNumber string
|
||||||
|
|
||||||
|
// Billing queries
|
||||||
|
updateBilling string
|
||||||
|
}
|
||||||
|
|||||||
40
user/util.go
40
user/util.go
@@ -94,6 +94,26 @@ func nullInt64(v int64) sql.NullInt64 {
|
|||||||
return sql.NullInt64{Int64: v, Valid: true}
|
return sql.NullInt64{Int64: v, Valid: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toSQLWildcard converts a wildcard string to a SQL wildcard string. It only allows '*' as wildcards,
|
||||||
|
// and escapes '_', assuming '\' as escape character.
|
||||||
|
func toSQLWildcard(s string) string {
|
||||||
|
return escapeUnderscore(strings.ReplaceAll(s, "*", "%"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromSQLWildcard converts a SQL wildcard string to a wildcard string. It converts '%' to '*',
|
||||||
|
// and removes the '\_' escape character.
|
||||||
|
func fromSQLWildcard(s string) string {
|
||||||
|
return strings.ReplaceAll(unescapeUnderscore(s), "%", "*")
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeUnderscore(s string) string {
|
||||||
|
return strings.ReplaceAll(s, "_", "\\_")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unescapeUnderscore(s string) string {
|
||||||
|
return strings.ReplaceAll(s, "\\_", "_")
|
||||||
|
}
|
||||||
|
|
||||||
// execTx executes a function in a transaction. If the function returns an error, the transaction is rolled back.
|
// execTx executes a function in a transaction. If the function returns an error, the transaction is rolled back.
|
||||||
func execTx(db *sql.DB, f func(tx *sql.Tx) error) error {
|
func execTx(db *sql.DB, f func(tx *sql.Tx) error) error {
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
@@ -125,23 +145,3 @@ func queryTx[T any](db *sql.DB, f func(tx *sql.Tx) (T, error)) (T, error) {
|
|||||||
}
|
}
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toSQLWildcard converts a wildcard string to a SQL wildcard string. It only allows '*' as wildcards,
|
|
||||||
// and escapes '_', assuming '\' as escape character.
|
|
||||||
func toSQLWildcard(s string) string {
|
|
||||||
return escapeUnderscore(strings.ReplaceAll(s, "*", "%"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// fromSQLWildcard converts a SQL wildcard string to a wildcard string. It converts '%' to '*',
|
|
||||||
// and removes the '\_' escape character.
|
|
||||||
func fromSQLWildcard(s string) string {
|
|
||||||
return strings.ReplaceAll(unescapeUnderscore(s), "%", "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeUnderscore(s string) string {
|
|
||||||
return strings.ReplaceAll(s, "_", "\\_")
|
|
||||||
}
|
|
||||||
|
|
||||||
func unescapeUnderscore(s string) string {
|
|
||||||
return strings.ReplaceAll(s, "\\_", "_")
|
|
||||||
}
|
|
||||||
|
|||||||
174
webpush/store.go
Normal file
174
webpush/store.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
package webpush
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"net/netip"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"heckel.io/ntfy/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
subscriptionIDPrefix = "wps_"
|
||||||
|
subscriptionIDLength = 10
|
||||||
|
subscriptionEndpointLimitPerSubscriberIP = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors returned by the store
|
||||||
|
var (
|
||||||
|
ErrWebPushTooManySubscriptions = errors.New("too many subscriptions")
|
||||||
|
ErrWebPushUserIDCannotBeEmpty = errors.New("user ID cannot be empty")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Store holds the database connection and queries for web push subscriptions.
|
||||||
|
type Store struct {
|
||||||
|
db *sql.DB
|
||||||
|
queries queries
|
||||||
|
}
|
||||||
|
|
||||||
|
// queries holds the database-specific SQL queries.
|
||||||
|
type queries struct {
|
||||||
|
selectSubscriptionIDByEndpoint string
|
||||||
|
selectSubscriptionCountBySubscriberIP string
|
||||||
|
selectSubscriptionsForTopic string
|
||||||
|
selectSubscriptionsExpiringSoon string
|
||||||
|
upsertSubscription string
|
||||||
|
updateSubscriptionWarningSent string
|
||||||
|
updateSubscriptionUpdatedAt string
|
||||||
|
deleteSubscriptionByEndpoint string
|
||||||
|
deleteSubscriptionByUserID string
|
||||||
|
deleteSubscriptionByAge string
|
||||||
|
insertSubscriptionTopic string
|
||||||
|
deleteSubscriptionTopicAll string
|
||||||
|
deleteSubscriptionTopicWithoutSubscription string
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertSubscription adds or updates Web Push subscriptions for the given topics and user ID.
|
||||||
|
func (s *Store) UpsertSubscription(endpoint string, auth, p256dh, userID string, subscriberIP netip.Addr, topics []string) error {
|
||||||
|
tx, err := s.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
// Read number of subscriptions for subscriber IP address
|
||||||
|
var subscriptionCount int
|
||||||
|
if err := tx.QueryRow(s.queries.selectSubscriptionCountBySubscriberIP, subscriberIP.String()).Scan(&subscriptionCount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Read existing subscription ID for endpoint (or create new ID)
|
||||||
|
var subscriptionID string
|
||||||
|
if err := tx.QueryRow(s.queries.selectSubscriptionIDByEndpoint, endpoint).Scan(&subscriptionID); errors.Is(err, sql.ErrNoRows) {
|
||||||
|
if subscriptionCount >= subscriptionEndpointLimitPerSubscriberIP {
|
||||||
|
return ErrWebPushTooManySubscriptions
|
||||||
|
}
|
||||||
|
subscriptionID = util.RandomStringPrefix(subscriptionIDPrefix, subscriptionIDLength)
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Insert or update subscription
|
||||||
|
updatedAt, warnedAt := time.Now().Unix(), 0
|
||||||
|
if _, err := tx.Exec(s.queries.upsertSubscription, subscriptionID, endpoint, auth, p256dh, userID, subscriberIP.String(), updatedAt, warnedAt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Replace all subscription topics
|
||||||
|
if _, err := tx.Exec(s.queries.deleteSubscriptionTopicAll, subscriptionID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, topic := range topics {
|
||||||
|
if _, err = tx.Exec(s.queries.insertSubscriptionTopic, subscriptionID, topic); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscriptionsForTopic returns all subscriptions for the given topic.
|
||||||
|
func (s *Store) SubscriptionsForTopic(topic string) ([]*Subscription, error) {
|
||||||
|
rows, err := s.db.Query(s.queries.selectSubscriptionsForTopic, topic)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return subscriptionsFromRows(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscriptionsExpiring returns all subscriptions that have not been updated for a given time period.
|
||||||
|
func (s *Store) SubscriptionsExpiring(warnAfter time.Duration) ([]*Subscription, error) {
|
||||||
|
rows, err := s.db.Query(s.queries.selectSubscriptionsExpiringSoon, time.Now().Add(-warnAfter).Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return subscriptionsFromRows(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkExpiryWarningSent marks the given subscriptions as having received a warning about expiring soon.
|
||||||
|
func (s *Store) MarkExpiryWarningSent(subscriptions []*Subscription) error {
|
||||||
|
tx, err := s.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
for _, subscription := range subscriptions {
|
||||||
|
if _, err := tx.Exec(s.queries.updateSubscriptionWarningSent, time.Now().Unix(), subscription.ID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSubscriptionsByEndpoint removes the subscription for the given endpoint.
|
||||||
|
func (s *Store) RemoveSubscriptionsByEndpoint(endpoint string) error {
|
||||||
|
_, err := s.db.Exec(s.queries.deleteSubscriptionByEndpoint, endpoint)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSubscriptionsByUserID removes all subscriptions for the given user ID.
|
||||||
|
func (s *Store) RemoveSubscriptionsByUserID(userID string) error {
|
||||||
|
if userID == "" {
|
||||||
|
return ErrWebPushUserIDCannotBeEmpty
|
||||||
|
}
|
||||||
|
_, err := s.db.Exec(s.queries.deleteSubscriptionByUserID, userID)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveExpiredSubscriptions removes all subscriptions that have not been updated for a given time period.
|
||||||
|
func (s *Store) RemoveExpiredSubscriptions(expireAfter time.Duration) error {
|
||||||
|
_, err := s.db.Exec(s.queries.deleteSubscriptionByAge, time.Now().Add(-expireAfter).Unix())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = s.db.Exec(s.queries.deleteSubscriptionTopicWithoutSubscription)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSubscriptionUpdatedAt updates the updated_at timestamp for a subscription by endpoint. This is
|
||||||
|
// exported for testing purposes.
|
||||||
|
func (s *Store) SetSubscriptionUpdatedAt(endpoint string, updatedAt int64) error {
|
||||||
|
_, err := s.db.Exec(s.queries.updateSubscriptionUpdatedAt, updatedAt, endpoint)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying database connection.
|
||||||
|
func (s *Store) Close() error {
|
||||||
|
return s.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func subscriptionsFromRows(rows *sql.Rows) ([]*Subscription, error) {
|
||||||
|
subscriptions := make([]*Subscription, 0)
|
||||||
|
for rows.Next() {
|
||||||
|
var id, endpoint, auth, p256dh, userID string
|
||||||
|
if err := rows.Scan(&id, &endpoint, &auth, &p256dh, &userID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
subscriptions = append(subscriptions, &Subscription{
|
||||||
|
ID: id,
|
||||||
|
Endpoint: endpoint,
|
||||||
|
Auth: auth,
|
||||||
|
P256dh: p256dh,
|
||||||
|
UserID: userID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return subscriptions, nil
|
||||||
|
}
|
||||||
123
webpush/store_postgres.go
Normal file
123
webpush/store_postgres.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
package webpush
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
postgresCreateTablesQuery = `
|
||||||
|
CREATE TABLE IF NOT EXISTS webpush_subscription (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
endpoint TEXT NOT NULL UNIQUE,
|
||||||
|
key_auth TEXT NOT NULL,
|
||||||
|
key_p256dh TEXT NOT NULL,
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
subscriber_ip TEXT NOT NULL,
|
||||||
|
updated_at BIGINT NOT NULL,
|
||||||
|
warned_at BIGINT NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_webpush_subscriber_ip ON webpush_subscription (subscriber_ip);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_webpush_updated_at ON webpush_subscription (updated_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_webpush_user_id ON webpush_subscription (user_id);
|
||||||
|
CREATE TABLE IF NOT EXISTS webpush_subscription_topic (
|
||||||
|
subscription_id TEXT NOT NULL REFERENCES webpush_subscription (id) ON DELETE CASCADE,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
PRIMARY KEY (subscription_id, topic)
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_webpush_topic ON webpush_subscription_topic (topic);
|
||||||
|
CREATE TABLE IF NOT EXISTS schema_version (
|
||||||
|
store TEXT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
`
|
||||||
|
|
||||||
|
postgresSelectSubscriptionIDByEndpointQuery = `SELECT id FROM webpush_subscription WHERE endpoint = $1`
|
||||||
|
postgresSelectSubscriptionCountBySubscriberIPQuery = `SELECT COUNT(*) FROM webpush_subscription WHERE subscriber_ip = $1`
|
||||||
|
postgresSelectSubscriptionsForTopicQuery = `
|
||||||
|
SELECT s.id, s.endpoint, s.key_auth, s.key_p256dh, s.user_id
|
||||||
|
FROM webpush_subscription_topic st
|
||||||
|
JOIN webpush_subscription s ON s.id = st.subscription_id
|
||||||
|
WHERE st.topic = $1
|
||||||
|
ORDER BY s.endpoint
|
||||||
|
`
|
||||||
|
postgresSelectSubscriptionsExpiringSoonQuery = `
|
||||||
|
SELECT id, endpoint, key_auth, key_p256dh, user_id
|
||||||
|
FROM webpush_subscription
|
||||||
|
WHERE warned_at = 0 AND updated_at <= $1
|
||||||
|
`
|
||||||
|
postgresUpsertSubscriptionQuery = `
|
||||||
|
INSERT INTO webpush_subscription (id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||||
|
ON CONFLICT (endpoint)
|
||||||
|
DO UPDATE SET key_auth = excluded.key_auth, key_p256dh = excluded.key_p256dh, user_id = excluded.user_id, subscriber_ip = excluded.subscriber_ip, updated_at = excluded.updated_at, warned_at = excluded.warned_at
|
||||||
|
`
|
||||||
|
postgresUpdateSubscriptionWarningSentQuery = `UPDATE webpush_subscription SET warned_at = $1 WHERE id = $2`
|
||||||
|
postgresUpdateSubscriptionUpdatedAtQuery = `UPDATE webpush_subscription SET updated_at = $1 WHERE endpoint = $2`
|
||||||
|
postgresDeleteSubscriptionByEndpointQuery = `DELETE FROM webpush_subscription WHERE endpoint = $1`
|
||||||
|
postgresDeleteSubscriptionByUserIDQuery = `DELETE FROM webpush_subscription WHERE user_id = $1`
|
||||||
|
postgresDeleteSubscriptionByAgeQuery = `DELETE FROM webpush_subscription WHERE updated_at <= $1`
|
||||||
|
|
||||||
|
postgresInsertSubscriptionTopicQuery = `INSERT INTO webpush_subscription_topic (subscription_id, topic) VALUES ($1, $2)`
|
||||||
|
postgresDeleteSubscriptionTopicAllQuery = `DELETE FROM webpush_subscription_topic WHERE subscription_id = $1`
|
||||||
|
postgresDeleteSubscriptionTopicWithoutSubscriptionQuery = `DELETE FROM webpush_subscription_topic WHERE subscription_id NOT IN (SELECT id FROM webpush_subscription)`
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL schema management queries
|
||||||
|
const (
|
||||||
|
pgCurrentSchemaVersion = 1
|
||||||
|
postgresInsertSchemaVersionQuery = `INSERT INTO schema_version (store, version) VALUES ('webpush', $1)`
|
||||||
|
postgresSelectSchemaVersionQuery = `SELECT version FROM schema_version WHERE store = 'webpush'`
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewPostgresStore creates a new PostgreSQL-backed web push store using an existing database connection pool.
|
||||||
|
func NewPostgresStore(db *sql.DB) (*Store, error) {
|
||||||
|
if err := setupPostgresDB(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Store{
|
||||||
|
db: db,
|
||||||
|
queries: queries{
|
||||||
|
selectSubscriptionIDByEndpoint: postgresSelectSubscriptionIDByEndpointQuery,
|
||||||
|
selectSubscriptionCountBySubscriberIP: postgresSelectSubscriptionCountBySubscriberIPQuery,
|
||||||
|
selectSubscriptionsForTopic: postgresSelectSubscriptionsForTopicQuery,
|
||||||
|
selectSubscriptionsExpiringSoon: postgresSelectSubscriptionsExpiringSoonQuery,
|
||||||
|
upsertSubscription: postgresUpsertSubscriptionQuery,
|
||||||
|
updateSubscriptionWarningSent: postgresUpdateSubscriptionWarningSentQuery,
|
||||||
|
updateSubscriptionUpdatedAt: postgresUpdateSubscriptionUpdatedAtQuery,
|
||||||
|
deleteSubscriptionByEndpoint: postgresDeleteSubscriptionByEndpointQuery,
|
||||||
|
deleteSubscriptionByUserID: postgresDeleteSubscriptionByUserIDQuery,
|
||||||
|
deleteSubscriptionByAge: postgresDeleteSubscriptionByAgeQuery,
|
||||||
|
insertSubscriptionTopic: postgresInsertSubscriptionTopicQuery,
|
||||||
|
deleteSubscriptionTopicAll: postgresDeleteSubscriptionTopicAllQuery,
|
||||||
|
deleteSubscriptionTopicWithoutSubscription: postgresDeleteSubscriptionTopicWithoutSubscriptionQuery,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupPostgresDB(db *sql.DB) error {
|
||||||
|
var schemaVersion int
|
||||||
|
err := db.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewPostgresDB(db)
|
||||||
|
}
|
||||||
|
if schemaVersion > pgCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, pgCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewPostgresDB(db *sql.DB) error {
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(postgresCreateTablesQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(postgresInsertSchemaVersionQuery, pgCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
145
webpush/store_sqlite.go
Normal file
145
webpush/store_sqlite.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package webpush
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sqliteCreateTablesQuery = `
|
||||||
|
CREATE TABLE IF NOT EXISTS subscription (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
endpoint TEXT NOT NULL,
|
||||||
|
key_auth TEXT NOT NULL,
|
||||||
|
key_p256dh TEXT NOT NULL,
|
||||||
|
user_id TEXT NOT NULL,
|
||||||
|
subscriber_ip TEXT NOT NULL,
|
||||||
|
updated_at INT NOT NULL,
|
||||||
|
warned_at INT NOT NULL DEFAULT 0
|
||||||
|
);
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_endpoint ON subscription (endpoint);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_subscriber_ip ON subscription (subscriber_ip);
|
||||||
|
CREATE TABLE IF NOT EXISTS subscription_topic (
|
||||||
|
subscription_id TEXT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
PRIMARY KEY (subscription_id, topic),
|
||||||
|
FOREIGN KEY (subscription_id) REFERENCES subscription (id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_topic ON subscription_topic (topic);
|
||||||
|
CREATE TABLE IF NOT EXISTS schemaVersion (
|
||||||
|
id INT PRIMARY KEY,
|
||||||
|
version INT NOT NULL
|
||||||
|
);
|
||||||
|
`
|
||||||
|
sqliteBuiltinStartupQueries = `
|
||||||
|
PRAGMA foreign_keys = ON;
|
||||||
|
`
|
||||||
|
|
||||||
|
sqliteSelectSubscriptionIDByEndpointQuery = `SELECT id FROM subscription WHERE endpoint = ?`
|
||||||
|
sqliteSelectSubscriptionCountBySubscriberIPQuery = `SELECT COUNT(*) FROM subscription WHERE subscriber_ip = ?`
|
||||||
|
sqliteSelectSubscriptionsForTopicQuery = `
|
||||||
|
SELECT id, endpoint, key_auth, key_p256dh, user_id
|
||||||
|
FROM subscription_topic st
|
||||||
|
JOIN subscription s ON s.id = st.subscription_id
|
||||||
|
WHERE st.topic = ?
|
||||||
|
ORDER BY endpoint
|
||||||
|
`
|
||||||
|
sqliteSelectSubscriptionsExpiringSoonQuery = `
|
||||||
|
SELECT id, endpoint, key_auth, key_p256dh, user_id
|
||||||
|
FROM subscription
|
||||||
|
WHERE warned_at = 0 AND updated_at <= ?
|
||||||
|
`
|
||||||
|
sqliteUpsertSubscriptionQuery = `
|
||||||
|
INSERT INTO subscription (id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
ON CONFLICT (endpoint)
|
||||||
|
DO UPDATE SET key_auth = excluded.key_auth, key_p256dh = excluded.key_p256dh, user_id = excluded.user_id, subscriber_ip = excluded.subscriber_ip, updated_at = excluded.updated_at, warned_at = excluded.warned_at
|
||||||
|
`
|
||||||
|
sqliteUpdateSubscriptionWarningSentQuery = `UPDATE subscription SET warned_at = ? WHERE id = ?`
|
||||||
|
sqliteUpdateSubscriptionUpdatedAtQuery = `UPDATE subscription SET updated_at = ? WHERE endpoint = ?`
|
||||||
|
sqliteDeleteSubscriptionByEndpointQuery = `DELETE FROM subscription WHERE endpoint = ?`
|
||||||
|
sqliteDeleteSubscriptionByUserIDQuery = `DELETE FROM subscription WHERE user_id = ?`
|
||||||
|
sqliteDeleteSubscriptionByAgeQuery = `DELETE FROM subscription WHERE updated_at <= ?` // Full table scan!
|
||||||
|
|
||||||
|
sqliteInsertSubscriptionTopicQuery = `INSERT INTO subscription_topic (subscription_id, topic) VALUES (?, ?)`
|
||||||
|
sqliteDeleteSubscriptionTopicAllQuery = `DELETE FROM subscription_topic WHERE subscription_id = ?`
|
||||||
|
sqliteDeleteSubscriptionTopicWithoutSubscriptionQuery = `DELETE FROM subscription_topic WHERE subscription_id NOT IN (SELECT id FROM subscription)`
|
||||||
|
)
|
||||||
|
|
||||||
|
// SQLite schema management queries
|
||||||
|
const (
|
||||||
|
sqliteCurrentSchemaVersion = 1
|
||||||
|
sqliteInsertSchemaVersionQuery = `INSERT INTO schemaVersion VALUES (1, ?)`
|
||||||
|
sqliteSelectSchemaVersionQuery = `SELECT version FROM schemaVersion WHERE id = 1`
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewSQLiteStore creates a new SQLite-backed web push store.
|
||||||
|
func NewSQLiteStore(filename, startupQueries string) (*Store, error) {
|
||||||
|
db, err := sql.Open("sqlite3", filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := setupSQLite(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := runSQLiteStartupQueries(db, startupQueries); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Store{
|
||||||
|
db: db,
|
||||||
|
queries: queries{
|
||||||
|
selectSubscriptionIDByEndpoint: sqliteSelectSubscriptionIDByEndpointQuery,
|
||||||
|
selectSubscriptionCountBySubscriberIP: sqliteSelectSubscriptionCountBySubscriberIPQuery,
|
||||||
|
selectSubscriptionsForTopic: sqliteSelectSubscriptionsForTopicQuery,
|
||||||
|
selectSubscriptionsExpiringSoon: sqliteSelectSubscriptionsExpiringSoonQuery,
|
||||||
|
upsertSubscription: sqliteUpsertSubscriptionQuery,
|
||||||
|
updateSubscriptionWarningSent: sqliteUpdateSubscriptionWarningSentQuery,
|
||||||
|
updateSubscriptionUpdatedAt: sqliteUpdateSubscriptionUpdatedAtQuery,
|
||||||
|
deleteSubscriptionByEndpoint: sqliteDeleteSubscriptionByEndpointQuery,
|
||||||
|
deleteSubscriptionByUserID: sqliteDeleteSubscriptionByUserIDQuery,
|
||||||
|
deleteSubscriptionByAge: sqliteDeleteSubscriptionByAgeQuery,
|
||||||
|
insertSubscriptionTopic: sqliteInsertSubscriptionTopicQuery,
|
||||||
|
deleteSubscriptionTopicAll: sqliteDeleteSubscriptionTopicAllQuery,
|
||||||
|
deleteSubscriptionTopicWithoutSubscription: sqliteDeleteSubscriptionTopicWithoutSubscriptionQuery,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupSQLite(db *sql.DB) error {
|
||||||
|
var schemaVersion int
|
||||||
|
err := db.QueryRow(sqliteSelectSchemaVersionQuery).Scan(&schemaVersion)
|
||||||
|
if err != nil {
|
||||||
|
return setupNewSQLite(db)
|
||||||
|
}
|
||||||
|
if schemaVersion > sqliteCurrentSchemaVersion {
|
||||||
|
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, sqliteCurrentSchemaVersion)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupNewSQLite(db *sql.DB) error {
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
if _, err := tx.Exec(sqliteCreateTablesQuery); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(sqliteInsertSchemaVersionQuery, sqliteCurrentSchemaVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSQLiteStartupQueries(db *sql.DB, startupQueries string) error {
|
||||||
|
if _, err := db.Exec(startupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := db.Exec(sqliteBuiltinStartupQueries); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
252
webpush/store_test.go
Normal file
252
webpush/store_test.go
Normal file
@@ -0,0 +1,252 @@
|
|||||||
|
package webpush_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
dbtest "heckel.io/ntfy/v2/db/test"
|
||||||
|
"heckel.io/ntfy/v2/webpush"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testWebPushEndpoint = "https://updates.push.services.mozilla.com/wpush/v1/AAABBCCCDDEEEFFF"
|
||||||
|
|
||||||
|
func forEachBackend(t *testing.T, f func(t *testing.T, store *webpush.Store)) {
|
||||||
|
t.Run("sqlite", func(t *testing.T) {
|
||||||
|
store, err := webpush.NewSQLiteStore(filepath.Join(t.TempDir(), "webpush.db"), "")
|
||||||
|
require.Nil(t, err)
|
||||||
|
t.Cleanup(func() { store.Close() })
|
||||||
|
f(t, store)
|
||||||
|
})
|
||||||
|
t.Run("postgres", func(t *testing.T) {
|
||||||
|
testDB := dbtest.CreateTestPostgres(t)
|
||||||
|
store, err := webpush.NewPostgresStore(testDB)
|
||||||
|
require.Nil(t, err)
|
||||||
|
f(t, store)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreUpsertSubscriptionSubscriptionsForTopic(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
||||||
|
|
||||||
|
subs, err := store.SubscriptionsForTopic("test-topic")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, subs[0].Endpoint, testWebPushEndpoint)
|
||||||
|
require.Equal(t, subs[0].P256dh, "p256dh-key")
|
||||||
|
require.Equal(t, subs[0].Auth, "auth-key")
|
||||||
|
require.Equal(t, subs[0].UserID, "u_1234")
|
||||||
|
|
||||||
|
subs2, err := store.SubscriptionsForTopic("mytopic")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs2, 1)
|
||||||
|
require.Equal(t, subs[0].Endpoint, subs2[0].Endpoint)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreUpsertSubscriptionSubscriberIPLimitReached(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert 10 subscriptions with the same IP address
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
endpoint := fmt.Sprintf(testWebPushEndpoint+"%d", i)
|
||||||
|
require.Nil(t, store.UpsertSubscription(endpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Another one for the same endpoint should be fine
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
||||||
|
|
||||||
|
// But with a different endpoint it should fail
|
||||||
|
require.Equal(t, webpush.ErrWebPushTooManySubscriptions, store.UpsertSubscription(testWebPushEndpoint+"11", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"test-topic", "mytopic"}))
|
||||||
|
|
||||||
|
// But with a different IP address it should be fine again
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"99", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("9.9.9.9"), []string{"test-topic", "mytopic"}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreUpsertSubscriptionUpdateTopics(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics, and another with one topic
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"1", "auth-key", "p256dh-key", "", netip.MustParseAddr("9.9.9.9"), []string{"topic1"}))
|
||||||
|
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 2)
|
||||||
|
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
||||||
|
require.Equal(t, testWebPushEndpoint+"1", subs[1].Endpoint)
|
||||||
|
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic2")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
||||||
|
|
||||||
|
// Update the first subscription to have only one topic
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
||||||
|
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 2)
|
||||||
|
require.Equal(t, testWebPushEndpoint+"0", subs[0].Endpoint)
|
||||||
|
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic2")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreUpsertSubscriptionUpdateFields(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert a subscription
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
||||||
|
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, "auth-key", subs[0].Auth)
|
||||||
|
require.Equal(t, "p256dh-key", subs[0].P256dh)
|
||||||
|
require.Equal(t, "u_1234", subs[0].UserID)
|
||||||
|
|
||||||
|
// Re-upsert the same endpoint with different auth, p256dh, and userID
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "new-auth", "new-p256dh", "u_5678", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
||||||
|
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, testWebPushEndpoint, subs[0].Endpoint)
|
||||||
|
require.Equal(t, "new-auth", subs[0].Auth)
|
||||||
|
require.Equal(t, "new-p256dh", subs[0].P256dh)
|
||||||
|
require.Equal(t, "u_5678", subs[0].UserID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreRemoveByUserIDMultiple(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert two subscriptions for u_1234 and one for u_5678
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"0", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"1", "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1"}))
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint+"2", "auth-key", "p256dh-key", "u_5678", netip.MustParseAddr("9.9.9.9"), []string{"topic1"}))
|
||||||
|
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 3)
|
||||||
|
|
||||||
|
// Remove all subscriptions for u_1234
|
||||||
|
require.Nil(t, store.RemoveSubscriptionsByUserID("u_1234"))
|
||||||
|
|
||||||
|
// Only u_5678's subscription should remain
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, testWebPushEndpoint+"2", subs[0].Endpoint)
|
||||||
|
require.Equal(t, "u_5678", subs[0].UserID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreRemoveByEndpoint(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
|
||||||
|
// And remove it again
|
||||||
|
require.Nil(t, store.RemoveSubscriptionsByEndpoint(testWebPushEndpoint))
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreRemoveByUserID(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
|
||||||
|
// And remove it again
|
||||||
|
require.Nil(t, store.RemoveSubscriptionsByUserID("u_1234"))
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreRemoveByUserIDEmpty(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
require.Equal(t, webpush.ErrWebPushUserIDCannotBeEmpty, store.RemoveSubscriptionsByUserID(""))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreExpiryWarningSent(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
|
||||||
|
// Set updated_at to the past so it shows up as expiring
|
||||||
|
require.Nil(t, store.SetSubscriptionUpdatedAt(testWebPushEndpoint, time.Now().Add(-8*24*time.Hour).Unix()))
|
||||||
|
|
||||||
|
// Verify subscription appears in expiring list (warned_at == 0)
|
||||||
|
subs, err := store.SubscriptionsExpiring(7 * 24 * time.Hour)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, testWebPushEndpoint, subs[0].Endpoint)
|
||||||
|
|
||||||
|
// Mark them as warning sent
|
||||||
|
require.Nil(t, store.MarkExpiryWarningSent(subs))
|
||||||
|
|
||||||
|
// Verify subscription no longer appears in expiring list (warned_at > 0)
|
||||||
|
subs, err = store.SubscriptionsExpiring(7 * 24 * time.Hour)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreExpiring(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
|
||||||
|
// Fake-mark them as soon-to-expire
|
||||||
|
require.Nil(t, store.SetSubscriptionUpdatedAt(testWebPushEndpoint, time.Now().Add(-8*24*time.Hour).Unix()))
|
||||||
|
|
||||||
|
// Should not be cleaned up yet
|
||||||
|
require.Nil(t, store.RemoveExpiredSubscriptions(9*24*time.Hour))
|
||||||
|
|
||||||
|
// Run expiration
|
||||||
|
subs, err = store.SubscriptionsExpiring(7 * 24 * time.Hour)
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
require.Equal(t, testWebPushEndpoint, subs[0].Endpoint)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreRemoveExpired(t *testing.T) {
|
||||||
|
forEachBackend(t, func(t *testing.T, store *webpush.Store) {
|
||||||
|
// Insert subscription with two topics
|
||||||
|
require.Nil(t, store.UpsertSubscription(testWebPushEndpoint, "auth-key", "p256dh-key", "u_1234", netip.MustParseAddr("1.2.3.4"), []string{"topic1", "topic2"}))
|
||||||
|
subs, err := store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 1)
|
||||||
|
|
||||||
|
// Fake-mark them as expired
|
||||||
|
require.Nil(t, store.SetSubscriptionUpdatedAt(testWebPushEndpoint, time.Now().Add(-10*24*time.Hour).Unix()))
|
||||||
|
|
||||||
|
// Run expiration
|
||||||
|
require.Nil(t, store.RemoveExpiredSubscriptions(9*24*time.Hour))
|
||||||
|
|
||||||
|
// List again, should be 0
|
||||||
|
subs, err = store.SubscriptionsForTopic("topic1")
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Len(t, subs, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
21
webpush/types.go
Normal file
21
webpush/types.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package webpush
|
||||||
|
|
||||||
|
import "heckel.io/ntfy/v2/log"
|
||||||
|
|
||||||
|
// Subscription represents a web push subscription.
|
||||||
|
type Subscription struct {
|
||||||
|
ID string
|
||||||
|
Endpoint string
|
||||||
|
Auth string
|
||||||
|
P256dh string
|
||||||
|
UserID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context returns the logging context for the subscription.
|
||||||
|
func (w *Subscription) Context() log.Context {
|
||||||
|
return map[string]any{
|
||||||
|
"web_push_subscription_id": w.ID,
|
||||||
|
"web_push_subscription_user_id": w.UserID,
|
||||||
|
"web_push_subscription_endpoint": w.Endpoint,
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user