mirror of
https://github.com/bitmagnet-io/bitmagnet.git
synced 2025-12-28 06:34:17 +00:00
Webui revamp (#280)
This commit is contained in:
parent
30636861ac
commit
309e3b892b
12
.github/workflows/checks.yml
vendored
12
.github/workflows/checks.yml
vendored
@ -22,6 +22,12 @@ jobs:
|
||||
- name: Run linters
|
||||
run: |
|
||||
nix develop --ignore-environment --keep HOME . --command task lint
|
||||
# Adding golang-ci lint as a separate step as the Nix package is currently broken
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.61
|
||||
args: --timeout=10m
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@ -65,13 +71,15 @@ jobs:
|
||||
- uses: cachix/install-nix-action@v26
|
||||
with:
|
||||
github_access_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install web UI, apply database migrations, generate code and build web app
|
||||
- name: Install web UI, apply database migrations, generate code, extract translations and build web app
|
||||
run: |
|
||||
nix develop --ignore-environment --command task install-webui
|
||||
nix develop --ignore-environment --keep HOME --keep POSTGRES_PASSWORD . --command task migrate
|
||||
nix develop --ignore-environment --keep HOME --keep POSTGRES_PASSWORD . --command task gen
|
||||
nix develop --ignore-environment . --command task i18n-extract
|
||||
nix develop --ignore-environment . --command task build-webui
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
- name: Check nothing changed
|
||||
run: git diff --exit-code
|
||||
# excluding the 3rdpartylicenses file in a horrible hack:
|
||||
run: git diff --exit-code -- . ':(exclude)webui/dist/bitmagnet/3rdpartylicenses.txt'
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
bitmagnet.io/schemas/**/*.*
|
||||
webui/dist/**/*.*
|
||||
webui/src/app/graphql/generated/**/*.*
|
||||
webui/src/app/i18n/translations/*.json
|
||||
webui/.angular
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:alpine3.18 AS build
|
||||
FROM golang:1.23.2-alpine3.20 AS build
|
||||
|
||||
RUN apk --update add \
|
||||
gcc \
|
||||
@ -13,7 +13,7 @@ WORKDIR /build
|
||||
|
||||
RUN go build -ldflags "-s -w -X github.com/bitmagnet-io/bitmagnet/internal/version.GitTag=$(git describe --tags --always --dirty)"
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.20
|
||||
|
||||
RUN apk --update add \
|
||||
curl \
|
||||
|
||||
13
Taskfile.yml
13
Taskfile.yml
@ -46,12 +46,19 @@ tasks:
|
||||
- go run . classifier schema --format json > ./bitmagnet.io/schemas/classifier-0.1.json
|
||||
|
||||
gen-webui-graphql:
|
||||
dir: ./webui
|
||||
cmds:
|
||||
- cd webui && npm run graphql:codegen
|
||||
- npm run graphql:codegen
|
||||
|
||||
i18n-extract:
|
||||
dir: ./webui
|
||||
cmds:
|
||||
- npm run i18n:extract
|
||||
|
||||
lint:
|
||||
cmds:
|
||||
- task lint-golangci
|
||||
# Removing golang-ci lint as the Nix package is currently broken
|
||||
# - task lint-golangci
|
||||
- task lint-webui
|
||||
- task lint-prettier
|
||||
|
||||
@ -93,7 +100,7 @@ tasks:
|
||||
build-webui:
|
||||
dir: ./webui
|
||||
cmds:
|
||||
- npm run build -- -c embedded
|
||||
- npm run build
|
||||
|
||||
build-docsite:
|
||||
dir: ./bitmagnet.io
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem "just-the-docs", "~> 0.6"
|
||||
gem "just-the-docs", "~> 0.10"
|
||||
gem "jekyll", "~> 4.3"
|
||||
gem "jekyll-redirect-from", "~> 0.16"
|
||||
gem "jekyll-seo-tag"
|
||||
gem "jekyll-target-blank", "~> 2.0"
|
||||
gem "kramdown", "~> 2.3"
|
||||
gem "kramdown", "~> 2.4"
|
||||
gem "kramdown-parser-gfm", "~> 1.1"
|
||||
gem "webrick", "~> 1.8"
|
||||
|
||||
@ -1,22 +1,28 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
addressable (2.8.5)
|
||||
public_suffix (>= 2.0.2, < 6.0)
|
||||
addressable (2.8.7)
|
||||
public_suffix (>= 2.0.2, < 7.0)
|
||||
bigdecimal (3.1.8)
|
||||
colorator (1.1.0)
|
||||
concurrent-ruby (1.2.2)
|
||||
concurrent-ruby (1.3.4)
|
||||
em-websocket (0.5.3)
|
||||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0)
|
||||
eventmachine (1.2.7)
|
||||
ffi (1.16.2)
|
||||
ffi (1.17.0-arm64-darwin)
|
||||
ffi (1.17.0-x86_64-linux-gnu)
|
||||
forwardable-extended (2.6.0)
|
||||
google-protobuf (3.24.3-arm64-darwin)
|
||||
google-protobuf (3.24.3-x86_64-linux)
|
||||
google-protobuf (4.28.2-arm64-darwin)
|
||||
bigdecimal
|
||||
rake (>= 13)
|
||||
google-protobuf (4.28.2-x86_64-linux)
|
||||
bigdecimal
|
||||
rake (>= 13)
|
||||
http_parser.rb (0.8.0)
|
||||
i18n (1.14.1)
|
||||
i18n (1.14.6)
|
||||
concurrent-ruby (~> 1.0)
|
||||
jekyll (4.3.2)
|
||||
jekyll (4.3.4)
|
||||
addressable (~> 2.4)
|
||||
colorator (~> 1.0)
|
||||
em-websocket (~> 0.5)
|
||||
@ -45,7 +51,7 @@ GEM
|
||||
nokogiri (~> 1.10)
|
||||
jekyll-watch (2.2.1)
|
||||
listen (~> 3.0)
|
||||
just-the-docs (0.6.2)
|
||||
just-the-docs (0.10.0)
|
||||
jekyll (>= 3.8.5)
|
||||
jekyll-include-cache
|
||||
jekyll-seo-tag (>= 2.0)
|
||||
@ -55,35 +61,34 @@ GEM
|
||||
kramdown-parser-gfm (1.1.0)
|
||||
kramdown (~> 2.0)
|
||||
liquid (4.0.4)
|
||||
listen (3.8.0)
|
||||
listen (3.9.0)
|
||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||
rb-inotify (~> 0.9, >= 0.9.10)
|
||||
mercenary (0.4.0)
|
||||
nokogiri (1.16.5-arm64-darwin)
|
||||
nokogiri (1.16.7-arm64-darwin)
|
||||
racc (~> 1.4)
|
||||
nokogiri (1.16.5-x86_64-linux)
|
||||
nokogiri (1.16.7-x86_64-linux)
|
||||
racc (~> 1.4)
|
||||
pathutil (0.16.2)
|
||||
forwardable-extended (~> 2.6)
|
||||
public_suffix (5.0.3)
|
||||
racc (1.7.3)
|
||||
rake (13.0.6)
|
||||
public_suffix (6.0.1)
|
||||
racc (1.8.1)
|
||||
rake (13.2.1)
|
||||
rb-fsevent (0.11.2)
|
||||
rb-inotify (0.10.1)
|
||||
rb-inotify (0.11.1)
|
||||
ffi (~> 1.0)
|
||||
rexml (3.2.8)
|
||||
strscan (>= 3.0.9)
|
||||
rouge (4.1.3)
|
||||
rexml (3.3.8)
|
||||
rouge (4.4.0)
|
||||
safe_yaml (1.0.5)
|
||||
sass-embedded (1.68.0-arm64-darwin)
|
||||
google-protobuf (~> 3.23)
|
||||
sass-embedded (1.68.0-x86_64-linux-gnu)
|
||||
google-protobuf (~> 3.23)
|
||||
strscan (3.1.0)
|
||||
sass-embedded (1.79.5)
|
||||
google-protobuf (~> 4.27)
|
||||
rake (>= 13)
|
||||
sass-embedded (1.79.5-arm64-darwin)
|
||||
google-protobuf (~> 4.27)
|
||||
terminal-table (3.0.2)
|
||||
unicode-display_width (>= 1.1.1, < 3)
|
||||
unicode-display_width (2.4.2)
|
||||
webrick (1.8.1)
|
||||
unicode-display_width (2.6.0)
|
||||
webrick (1.8.2)
|
||||
|
||||
PLATFORMS
|
||||
arm64-darwin-22
|
||||
@ -95,10 +100,10 @@ DEPENDENCIES
|
||||
jekyll-redirect-from (~> 0.16)
|
||||
jekyll-seo-tag
|
||||
jekyll-target-blank (~> 2.0)
|
||||
just-the-docs (~> 0.6)
|
||||
kramdown (~> 2.3)
|
||||
just-the-docs (~> 0.10)
|
||||
kramdown (~> 2.4)
|
||||
kramdown-parser-gfm (~> 1.1)
|
||||
webrick (~> 1.8)
|
||||
|
||||
BUNDLED WITH
|
||||
2.5.7
|
||||
2.5.9
|
||||
|
||||
@ -24,22 +24,17 @@ As a rough guide, you should allow around 300MB RAM for BitMagnet, and at least
|
||||
|
||||
## I've started **bitmagnet** for the first time and am not seeing torrents right away, is something wrong?
|
||||
|
||||
If everything is working, **bitmagnet** should begin showing torrents in the web UI within a maximum of 10 minutes (which is its cache TTL). The round blue refresh button in the web UI is a cache buster - use it to see new torrent content in real time. Bear in mind that when a torrent is inserted into the database, a background queue job must run before it will become available in the UI. If you're importing thousands or millions of torrents, it might therefore take a while for everything to show. Check the next question if you're still not seeing torrents.
|
||||
If everything is working, **bitmagnet** should begin showing torrents in the web UI within a maximum of 10 minutes (which is its cache TTL). The refresh button at the top of the torrent listing is a cache buster - use it to see new torrent content in real time. Bear in mind that when a torrent is inserted into the database, a background queue job must run before it will become available in the UI. If you're importing thousands or millions of torrents, it might therefore take a while for everything to show. Check the next question if you're still not seeing torrents.
|
||||
|
||||
## **bitmagnet** isn't finding any new torrents, what's wrong?
|
||||
|
||||
{: .highlight }
|
||||
If **bitmagnet** isn't finding new torrents, it probably isn't due to a problem with the software - many people are using it successfully. You may have a networking or firewall issue, or a VPN misconfiguration preventing you from connecting to the DHT. Additionally, the TMDB API is blocked in certain countries; if you are in an affected country you may need to either disable the TMDB integration with the `tmdb.enabled` configuration key, or use a VPN.
|
||||
If **bitmagnet** isn't finding new torrents, it probably isn't due to a problem with the software - many people are using it successfully. You may have a networking or firewall issue, or a VPN misconfiguration preventing you from connecting to the DHT. Additionally, the TMDB API is blocked in certain countries; if you are in an affected country you may need to either disable the TMDB integration with the `tmdb.enabled` configuration key, or use a VPN. Configuring a personal TMDB API key (or disabling TMDB) will make the queue run a **lot** faster.
|
||||
|
||||
Here are some things to check if you're not seeing any new torrents:
|
||||
**bitmagnet** now shows its health status in the main toolbar: It will show a tick for health, a cross for unhealthy or sometimes 3 dots for pending. Click on it to open the health dialog and check that all workers are running and healthy. The dashboard can be used to monitor queue throughput. On the queues dashboard, the following would indicate a problem:
|
||||
|
||||
- Press the round blue refresh button in the UI.
|
||||
- Visit the metrics endpoint at `/metrics` and check the following metrics:
|
||||
- `bitmagnet_dht_crawler_persisted_total`: If you see a positive number for this, the DHT crawler is working and has found torrents.
|
||||
- If torrents are being persisted but you still don't see them in the UI, then check:`bitmagnet_queue_jobs_total{queue="process_torrent",status="processed"}`: If you see a positive number here, then the queue worker is running and processing jobs. If you see `status="failed"` or `status="retry"`, but no `status="processed"`, then something is wrong.
|
||||
- If no torrents are being persisted, check: `bitmagnet_dht_server_query_success_total` and `bitmagnet_dht_server_query_error_total`. Having some DHT query errors is completely normal, but if you see no successful queries then something is wrong.
|
||||
- If any of the above metrics are missing, you can assume their value is zero.
|
||||
- If the metrics confirm a problem, check the logs for errors.
|
||||
- A high number of pending jobs, and the number of processed jobs not increasing over time
|
||||
- A high number of failed jobs
|
||||
- No new jobs being created over time
|
||||
|
||||
## Why doesn't **bitmagnet** show me exactly how many torrents it has indexed?
|
||||
|
||||
@ -51,7 +46,7 @@ This will depend on a number of factors, including your hardware and network con
|
||||
|
||||
## How can I see exactly how many torrents **bitmagnet** has crawled in the current session?
|
||||
|
||||
Visit the metrics endpoint at `/metrics` and check the metric `bitmagnet_dht_crawler_persisted_total`. `{entity="Torrent"}` corresponds to newly crawled torrents, and `{entity="TorrentsTorrentSource"}` corresponds to torrents that were rediscovered and had their seeders/leechers count, and last-seen-on date updated.
|
||||
The new dashboard shows throughput of the crawler and job queue. Alternatively, visit the metrics endpoint at `/metrics` and check the metric `bitmagnet_dht_crawler_persisted_total`. `{entity="Torrent"}` corresponds to newly crawled torrents, and `{entity="TorrentsTorrentSource"}` corresponds to torrents that were rediscovered and had their seeders/leechers count, and last-seen-on date updated.
|
||||
|
||||
## How are the seeders/leechers numbers determined for torrents crawled from the DHT?
|
||||
|
||||
@ -69,6 +64,10 @@ No. The DHT crawler works by sampling random info hashes from the network, and w
|
||||
|
||||
**bitmagnet** is in early development, and improving the classifier will be an ongoing effort. When new versions are released, you can follow the [reclassify turorial](/tutorials/reprocess-reclassify.html) to reclassify torrents. If you'd like to [improve or customize the classifier](/guides/classifier.html), this is also possible.
|
||||
|
||||
## How can I make **bitmagnet** automatically delete torrents I'm not interested in?
|
||||
|
||||
A better question would be: why bother? Disk space is inexpensive in the quantities required by **bitmagnet**, and searching is easier than deleting. Nevertheless this is one of the most commonly asked questions, and it is possible to do this by [customizing the classifier](/guides/classifier.html). Please consider the wastage of resources and load on the network created by deleting what you've crawled. Also remember that the classifier isn't perfect: for example, enabling deletion of XXX content will also delete anything that has been mis-identified as XXX by the classifier, preventing you from finding it in future - for example because it contains a rude word. If you are deleting a large proportion of what you're crawling, you are almost certainly deleting over-zealously and you should consider just using one of the many indexer sites instead.
|
||||
|
||||
## Can I run multiple **bitmagnet** instances pointing to the same database?
|
||||
|
||||
Yes you can, just point multiple instances to one database and it will work - _but_ it will put more load on the database and cause the app to run slower. An alternative is to run multiple instances with multiple databases, and periodically [merge the databases](/guides/backup-restore-merge.html).
|
||||
|
||||
@ -36,13 +36,12 @@ This means that **bitmagnet** is not reliant on any external trackers or torrent
|
||||
- [x] A GraphQL API: currently this provides a single search query; there is also an embedded GraphQL playground at `/graphql`
|
||||
- [x] A web user interface implemented in Angular: currently this is a simple single-page application providing a user interface for search queries via the GraphQL API
|
||||
- [x] [A Torznab-compatible endpoint for integration with the Serverr stack](/guides/servarr-integration.html)
|
||||
- [x] A WebUI dashboard for monitoring and administration
|
||||
|
||||
### High priority features not yet implemented
|
||||
|
||||
- [ ] A WebUI dashboard showing things like crawler throughput, task queue, database size etc.
|
||||
- [ ] Authentication, API keys, access levels etc.
|
||||
- [ ] An admin API, and in general a more complete GraphQL API
|
||||
- [ ] A more complete web UI
|
||||
- [ ] Saved searches for content of particular interest, enabling custom feeds in addition to the following feature
|
||||
- [ ] Bi-directional integration with the [Prowlarr indexer proxy](https://prowlarr.com/): Currently **bitmagnet** can be added as an indexer in Prowlarr; bi-directional integration would allow **bitmagnet** to crawl content from any indexer configured in Prowlarr, unlocking many new sources of content
|
||||
- [ ] More documentation and more tests!
|
||||
|
||||
@ -85,3 +85,17 @@ When referring to CLI commands in the rest of the documentation, for simplicity
|
||||
```sh
|
||||
bitmagnet --help
|
||||
```
|
||||
|
||||
## Starting **bitmagnet**
|
||||
|
||||
**bitmagnet** runs as multiple worker processes that can be started either individually or all at once. To start all workers, run:
|
||||
|
||||
```sh
|
||||
bitmagnet worker run --all
|
||||
```
|
||||
|
||||
Alternatively, specify individual workers to start:
|
||||
|
||||
```sh
|
||||
bitmagnet worker run --keys=http_server,queue_server,dht_crawler
|
||||
```
|
||||
|
||||
18
flake.lock
18
flake.lock
@ -5,11 +5,11 @@
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1710146030,
|
||||
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||
"lastModified": 1726560853,
|
||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@ -20,16 +20,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1713805509,
|
||||
"narHash": "sha256-YgSEan4CcrjivCNO5ZNzhg7/8ViLkZ4CB/GrGBVSudo=",
|
||||
"owner": "NixOS",
|
||||
"lastModified": 1728500571,
|
||||
"narHash": "sha256-dOymOQ3AfNI4Z337yEwHGohrVQb4yPODCW9MDUyAc4w=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1e1dc66fe68972a76679644a5577828b6a7e8be4",
|
||||
"rev": "d51c28603def282a24fa034bcb007e2bcb5b5dd0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
description = "A basic flake with a shell";
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
outputs = {
|
||||
@ -20,7 +20,7 @@
|
||||
go-task
|
||||
golangci-lint
|
||||
jekyll
|
||||
nodejs_20
|
||||
nodejs_22
|
||||
nodePackages.prettier
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
|
||||
37
go.mod
37
go.mod
@ -1,9 +1,9 @@
|
||||
module github.com/bitmagnet-io/bitmagnet
|
||||
|
||||
go 1.22
|
||||
go 1.23
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.45
|
||||
github.com/99designs/gqlgen v0.17.55
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/abice/go-enum v0.6.0
|
||||
github.com/adrg/xdg v0.4.0
|
||||
@ -18,10 +18,9 @@ require (
|
||||
github.com/go-playground/validator/v10 v10.20.0
|
||||
github.com/go-resty/resty/v2 v2.12.0
|
||||
github.com/google/cel-go v0.20.1
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.7
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.8
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/hedhyw/rex v0.6.0
|
||||
github.com/hellofresh/health-go/v5 v5.5.3
|
||||
github.com/iancoleman/strcase v0.3.0
|
||||
github.com/jackc/pgx/v5 v5.5.5
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.8
|
||||
@ -35,17 +34,17 @@ require (
|
||||
github.com/rs/cors/wrapper/gin v0.0.0-20240429123903-3d336ea9b505
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/vektah/gqlparser/v2 v2.5.14
|
||||
github.com/vektra/mockery/v2 v2.43.0
|
||||
github.com/urfave/cli/v2 v2.27.4
|
||||
github.com/vektah/gqlparser/v2 v2.5.17
|
||||
github.com/vektra/mockery/v2 v2.46.2
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
go.uber.org/fx v1.21.1
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.19.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.25.0
|
||||
golang.org/x/text v0.18.0
|
||||
golang.org/x/time v0.5.0
|
||||
google.golang.org/protobuf v1.34.0
|
||||
google.golang.org/protobuf v1.35.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/mysql v1.5.6
|
||||
gorm.io/driver/postgres v1.5.7
|
||||
@ -137,7 +136,7 @@ require (
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sosodev/duration v1.3.0 // indirect
|
||||
github.com/sosodev/duration v1.3.1 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
@ -151,18 +150,16 @@ require (
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
go.opentelemetry.io/otel v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.7.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/crypto v0.27.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/term v0.24.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
golang.org/x/tools/cmd/cover v0.1.0-deprecated // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect
|
||||
|
||||
74
go.sum
74
go.sum
@ -4,8 +4,8 @@ crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oX
|
||||
crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/99designs/gqlgen v0.17.45 h1:bH0AH67vIJo8JKNKPJP+pOPpQhZeuVRQLf53dKIpDik=
|
||||
github.com/99designs/gqlgen v0.17.45/go.mod h1:Bas0XQ+Jiu/Xm5E33jC8sES3G+iC2esHBMXcq0fUPs0=
|
||||
github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM=
|
||||
github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
@ -16,8 +16,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
|
||||
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
|
||||
github.com/PuerkitoBio/goquery v1.9.1 h1:mTL6XjbJTZdpfL+Gwl5U2h1l9yEkJjhmlTeV9VPW7UI=
|
||||
github.com/PuerkitoBio/goquery v1.9.1/go.mod h1:cW1n6TmIMDoORQU5IU/P1T3tGFunOeXEpGP2WHRwkbY=
|
||||
github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0=
|
||||
github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
|
||||
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
||||
@ -228,8 +228,8 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.7/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
@ -237,8 +237,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hedhyw/rex v0.6.0 h1:VoCgjAn2st5qshzHM3Qcd4lEHq8y1PDGNp+MO5x0G64=
|
||||
github.com/hedhyw/rex v0.6.0/go.mod h1:n9CYz3ztkAp56mrMXw65Q3LeXCO2AZSUvO7VMHsVMF8=
|
||||
github.com/hellofresh/health-go/v5 v5.5.3 h1:i+mfJcA8te/QhBzrBZxOw344XgIvHrc9IQzrEyn3OUQ=
|
||||
github.com/hellofresh/health-go/v5 v5.5.3/go.mod h1:maWprKoK7N9zno7l2ubFEGVF2SDmTHq5D9sV+lCFmGs=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@ -283,7 +281,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@ -429,8 +426,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/sosodev/duration v1.3.0 h1:g3E6mto+hFdA2uZXeNDYff8LYeg7v5D4YKP/Ng/NUkE=
|
||||
github.com/sosodev/duration v1.3.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
|
||||
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
@ -477,12 +474,12 @@ github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 h1:QEePdg0t
|
||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43/go.mod h1:OYRfF6eb5wY9VRFkXJH8FFBi3plw2v+giaIu7P054pM=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
|
||||
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
|
||||
github.com/vektah/gqlparser/v2 v2.5.14 h1:dzLq75BJe03jjQm6n56PdH1oweB8ana42wj7E4jRy70=
|
||||
github.com/vektah/gqlparser/v2 v2.5.14/go.mod h1:WQQjFc+I1YIzoPvZBhUQX7waZgg3pMLi0r8KymvAE2w=
|
||||
github.com/vektra/mockery/v2 v2.43.0 h1:9jgLwYbFIKPwWJUeK6Y+0s9oLRIGXLfW4FWlmF9R8c0=
|
||||
github.com/vektra/mockery/v2 v2.43.0/go.mod h1:XNTE9RIu3deGAGQRVjP1VZxGpQNm0YedZx4oDs3prr8=
|
||||
github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
|
||||
github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
|
||||
github.com/vektah/gqlparser/v2 v2.5.17 h1:9At7WblLV7/36nulgekUgIaqHZWn5hxqluxrxGUhOmI=
|
||||
github.com/vektah/gqlparser/v2 v2.5.17/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
|
||||
github.com/vektra/mockery/v2 v2.46.2 h1:bpUncWvkiDzqn+aWwt4dY1aS0F8Ob4k8+WJrWU/Kh4s=
|
||||
github.com/vektra/mockery/v2 v2.46.2/go.mod h1:dDivqi0ShM8A29mLgZn13yZ14MdXlTM4V360u8JDWCQ=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -492,17 +489,13 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
|
||||
go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
|
||||
go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
|
||||
go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
@ -523,8 +516,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||
@ -536,8 +529,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -558,8 +551,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -571,8 +564,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -601,8 +594,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
@ -611,8 +604,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@ -620,8 +613,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -635,8 +629,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/tools/cmd/cover v0.1.0-deprecated h1:Rwy+mWYz6loAF+LnG1jHG/JWMHRMMC2/1XX3Ejkx9lA=
|
||||
golang.org/x/tools/cmd/cover v0.1.0-deprecated/go.mod h1:hMDiIvlpN1NoVgmjLjUJE9tMHyxHjFX7RuQ+rW12mSA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -661,8 +655,8 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
|
||||
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
13
graphql/fragments/QueueJob.graphql
Normal file
13
graphql/fragments/QueueJob.graphql
Normal file
@ -0,0 +1,13 @@
|
||||
fragment QueueJob on QueueJob {
|
||||
id
|
||||
queue
|
||||
status
|
||||
payload
|
||||
priority
|
||||
retries
|
||||
maxRetries
|
||||
runAfter
|
||||
ranAt
|
||||
error
|
||||
createdAt
|
||||
}
|
||||
21
graphql/fragments/QueueJobsQueryResult.graphql
Normal file
21
graphql/fragments/QueueJobsQueryResult.graphql
Normal file
@ -0,0 +1,21 @@
|
||||
#import "../fragments/QueueJob"
|
||||
|
||||
fragment QueueJobsQueryResult on QueueJobsQueryResult {
|
||||
items {
|
||||
...QueueJob
|
||||
}
|
||||
totalCount
|
||||
hasNextPage
|
||||
aggregations {
|
||||
queue {
|
||||
value
|
||||
label
|
||||
count
|
||||
}
|
||||
status {
|
||||
value
|
||||
label
|
||||
count
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -9,9 +9,6 @@ fragment Torrent on Torrent {
|
||||
hasFilesInfo
|
||||
singleFile
|
||||
fileType
|
||||
files {
|
||||
...TorrentFile
|
||||
}
|
||||
sources {
|
||||
key
|
||||
name
|
||||
|
||||
9
graphql/fragments/TorrentFilesQueryResult.graphql
Normal file
9
graphql/fragments/TorrentFilesQueryResult.graphql
Normal file
@ -0,0 +1,9 @@
|
||||
#import "./TorrentFile"
|
||||
|
||||
fragment TorrentFilesQueryResult on TorrentFilesQueryResult {
|
||||
items {
|
||||
...TorrentFile
|
||||
}
|
||||
totalCount
|
||||
hasNextPage
|
||||
}
|
||||
@ -0,0 +1,7 @@
|
||||
mutation QueueEnqueueReprocessTorrentsBatch(
|
||||
$input: QueueEnqueueReprocessTorrentsBatchInput!
|
||||
) {
|
||||
queue {
|
||||
enqueueReprocessTorrentsBatch(input: $input)
|
||||
}
|
||||
}
|
||||
5
graphql/mutations/QueuePurgeJobs.graphql
Normal file
5
graphql/mutations/QueuePurgeJobs.graphql
Normal file
@ -0,0 +1,5 @@
|
||||
mutation QueuePurgeJobs($input: QueuePurgeJobsInput!) {
|
||||
queue {
|
||||
purgeJobs(input: $input)
|
||||
}
|
||||
}
|
||||
21
graphql/queries/HealthCheck.graphql
Normal file
21
graphql/queries/HealthCheck.graphql
Normal file
@ -0,0 +1,21 @@
|
||||
#import "../fragments/TorrentContentSearchResult"
|
||||
|
||||
query HealthCheck {
|
||||
health {
|
||||
status
|
||||
checks {
|
||||
key
|
||||
status
|
||||
timestamp
|
||||
error
|
||||
}
|
||||
}
|
||||
workers {
|
||||
listAll {
|
||||
workers {
|
||||
key
|
||||
started
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
9
graphql/queries/QueueJobs.graphql
Normal file
9
graphql/queries/QueueJobs.graphql
Normal file
@ -0,0 +1,9 @@
|
||||
#import "../fragments/QueueJobsQueryResult"
|
||||
|
||||
query QueueJobs($input: QueueJobsQueryInput!) {
|
||||
queue {
|
||||
jobs(input: $input) {
|
||||
...QueueJobsQueryResult
|
||||
}
|
||||
}
|
||||
}
|
||||
14
graphql/queries/QueueMetrics.graphql
Normal file
14
graphql/queries/QueueMetrics.graphql
Normal file
@ -0,0 +1,14 @@
|
||||
query QueueMetrics($input: QueueMetricsQueryInput!) {
|
||||
queue {
|
||||
metrics(input: $input) {
|
||||
buckets {
|
||||
queue
|
||||
status
|
||||
createdAtBucket
|
||||
ranAtBucket
|
||||
count
|
||||
latency
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,5 +0,0 @@
|
||||
query SystemQuery {
|
||||
system {
|
||||
version
|
||||
}
|
||||
}
|
||||
@ -1,12 +1,8 @@
|
||||
#import "../fragments/TorrentContentSearchResult"
|
||||
|
||||
query TorrentContentSearch(
|
||||
$query: SearchQueryInput
|
||||
$facets: TorrentContentFacetsInput
|
||||
$orderBy: [TorrentContentOrderByInput!]
|
||||
) {
|
||||
query TorrentContentSearch($input: TorrentContentSearchQueryInput!) {
|
||||
torrentContent {
|
||||
search(query: $query, facets: $facets, orderBy: $orderBy) {
|
||||
search(input: $input) {
|
||||
...TorrentContentSearchResult
|
||||
}
|
||||
}
|
||||
|
||||
9
graphql/queries/TorrentFiles.graphql
Normal file
9
graphql/queries/TorrentFiles.graphql
Normal file
@ -0,0 +1,9 @@
|
||||
#import "../fragments/TorrentFilesQueryResult"
|
||||
|
||||
query TorrentFiles($input: TorrentFilesQueryInput!) {
|
||||
torrent {
|
||||
files(input: $input) {
|
||||
...TorrentFilesQueryResult
|
||||
}
|
||||
}
|
||||
}
|
||||
18
graphql/queries/TorrentMetrics.graphql
Normal file
18
graphql/queries/TorrentMetrics.graphql
Normal file
@ -0,0 +1,18 @@
|
||||
query TorrentMetrics($input: TorrentMetricsQueryInput!) {
|
||||
torrent {
|
||||
metrics(input: $input) {
|
||||
buckets {
|
||||
source
|
||||
updated
|
||||
bucket
|
||||
count
|
||||
}
|
||||
}
|
||||
listSources {
|
||||
sources {
|
||||
key
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
query TorrentSuggestTags($query: SuggestTagsQueryInput!) {
|
||||
query TorrentSuggestTags($input: SuggestTagsQueryInput!) {
|
||||
torrent {
|
||||
suggestTags(query: $query) {
|
||||
suggestTags(input: $input) {
|
||||
suggestions {
|
||||
name
|
||||
count
|
||||
|
||||
3
graphql/queries/Version.graphql
Normal file
3
graphql/queries/Version.graphql
Normal file
@ -0,0 +1,3 @@
|
||||
query Version {
|
||||
version
|
||||
}
|
||||
@ -34,42 +34,58 @@ enum FilesStatus {
|
||||
}
|
||||
|
||||
enum Language {
|
||||
af
|
||||
ar
|
||||
bs
|
||||
az
|
||||
be
|
||||
bg
|
||||
bs
|
||||
ca
|
||||
zh
|
||||
ce
|
||||
co
|
||||
cs
|
||||
cy
|
||||
da
|
||||
nl
|
||||
en
|
||||
et
|
||||
fi
|
||||
fr
|
||||
de
|
||||
el
|
||||
en
|
||||
es
|
||||
et
|
||||
eu
|
||||
fa
|
||||
fi
|
||||
fr
|
||||
he
|
||||
hi
|
||||
hr
|
||||
hu
|
||||
is
|
||||
hy
|
||||
id
|
||||
is
|
||||
it
|
||||
ja
|
||||
ka
|
||||
ko
|
||||
lv
|
||||
ku
|
||||
lt
|
||||
lv
|
||||
mi
|
||||
mk
|
||||
ml
|
||||
mn
|
||||
ms
|
||||
mt
|
||||
nl
|
||||
no
|
||||
fa
|
||||
pl
|
||||
pt
|
||||
ro
|
||||
ru
|
||||
sa
|
||||
sk
|
||||
sl
|
||||
es
|
||||
sm
|
||||
so
|
||||
sr
|
||||
sv
|
||||
ta
|
||||
@ -77,24 +93,8 @@ enum Language {
|
||||
tr
|
||||
uk
|
||||
vi
|
||||
af
|
||||
hy
|
||||
az
|
||||
eu
|
||||
be
|
||||
ce
|
||||
co
|
||||
ka
|
||||
ku
|
||||
mi
|
||||
ms
|
||||
mt
|
||||
mn
|
||||
sa
|
||||
sm
|
||||
so
|
||||
cy
|
||||
yi
|
||||
zh
|
||||
zu
|
||||
}
|
||||
|
||||
@ -146,14 +146,27 @@ enum VideoSource {
|
||||
BluRay
|
||||
}
|
||||
|
||||
enum TorrentContentOrderBy {
|
||||
Relevance
|
||||
PublishedAt
|
||||
UpdatedAt
|
||||
Size
|
||||
Files
|
||||
Seeders
|
||||
Leechers
|
||||
Name
|
||||
InfoHash
|
||||
enum TorrentContentOrderByField {
|
||||
relevance
|
||||
published_at
|
||||
updated_at
|
||||
size
|
||||
files_count
|
||||
seeders
|
||||
leechers
|
||||
name
|
||||
info_hash
|
||||
}
|
||||
|
||||
enum TorrentFilesOrderByField {
|
||||
index
|
||||
path
|
||||
extension
|
||||
size
|
||||
}
|
||||
|
||||
enum QueueJobsOrderByField {
|
||||
created_at
|
||||
ran_at
|
||||
priority
|
||||
}
|
||||
|
||||
44
graphql/schema/metrics.graphqls
Normal file
44
graphql/schema/metrics.graphqls
Normal file
@ -0,0 +1,44 @@
|
||||
enum MetricsBucketDuration {
|
||||
minute
|
||||
hour
|
||||
day
|
||||
}
|
||||
|
||||
type QueueMetricsBucket {
|
||||
queue: String!
|
||||
status: QueueJobStatus!
|
||||
createdAtBucket: DateTime!
|
||||
ranAtBucket: DateTime
|
||||
count: Int!
|
||||
latency: Duration
|
||||
}
|
||||
|
||||
type QueueMetricsQueryResult {
|
||||
buckets: [QueueMetricsBucket!]!
|
||||
}
|
||||
|
||||
input QueueMetricsQueryInput {
|
||||
bucketDuration: MetricsBucketDuration!
|
||||
statuses: [QueueJobStatus!]
|
||||
queues: [String!]
|
||||
startTime: DateTime
|
||||
endTime: DateTime
|
||||
}
|
||||
|
||||
type TorrentMetricsBucket {
|
||||
source: String!
|
||||
bucket: DateTime!
|
||||
updated: Boolean!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type TorrentMetricsQueryResult {
|
||||
buckets: [TorrentMetricsBucket!]!
|
||||
}
|
||||
|
||||
input TorrentMetricsQueryInput {
|
||||
bucketDuration: MetricsBucketDuration!
|
||||
sources: [String!]
|
||||
startTime: DateTime
|
||||
endTime: DateTime
|
||||
}
|
||||
@ -10,7 +10,7 @@ type Torrent {
|
||||
fileType: FileType
|
||||
fileTypes: [FileType!]
|
||||
files: [TorrentFile!]
|
||||
sources: [TorrentSource!]!
|
||||
sources: [TorrentSourceInfo!]!
|
||||
seeders: Int
|
||||
leechers: Int
|
||||
tagNames: [String!]!
|
||||
@ -33,6 +33,11 @@ type TorrentFile {
|
||||
type TorrentSource {
|
||||
key: String!
|
||||
name: String!
|
||||
}
|
||||
|
||||
type TorrentSourceInfo {
|
||||
key: String!
|
||||
name: String!
|
||||
importId: String
|
||||
seeders: Int
|
||||
leechers: Int
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
type Mutation {
|
||||
torrent: TorrentMutation!
|
||||
queue: QueueMutation!
|
||||
}
|
||||
|
||||
type TorrentMutation {
|
||||
|
||||
@ -1,11 +1,17 @@
|
||||
type Query {
|
||||
version: String!
|
||||
workers: WorkersQuery!
|
||||
health: HealthQuery!
|
||||
queue: QueueQuery!
|
||||
torrent: TorrentQuery!
|
||||
torrentContent: TorrentContentQuery!
|
||||
system: SystemQuery!
|
||||
}
|
||||
|
||||
type TorrentQuery {
|
||||
suggestTags(query: SuggestTagsQueryInput): TorrentSuggestTagsResult!
|
||||
files(input: TorrentFilesQueryInput!): TorrentFilesQueryResult!
|
||||
listSources: TorrentListSourcesResult!
|
||||
suggestTags(input: SuggestTagsQueryInput): TorrentSuggestTagsResult!
|
||||
metrics(input: TorrentMetricsQueryInput!): TorrentMetricsQueryResult!
|
||||
}
|
||||
|
||||
input SuggestTagsQueryInput {
|
||||
@ -17,19 +23,47 @@ type TorrentSuggestTagsResult {
|
||||
suggestions: [SuggestedTag!]!
|
||||
}
|
||||
|
||||
type TorrentListSourcesResult {
|
||||
sources: [TorrentSource!]!
|
||||
}
|
||||
|
||||
type SuggestedTag {
|
||||
name: String!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type TorrentContentQuery {
|
||||
search(
|
||||
query: SearchQueryInput
|
||||
facets: TorrentContentFacetsInput
|
||||
orderBy: [TorrentContentOrderByInput!]
|
||||
): TorrentContentSearchResult!
|
||||
search(input: TorrentContentSearchQueryInput!): TorrentContentSearchResult!
|
||||
}
|
||||
|
||||
type SystemQuery {
|
||||
version: String!
|
||||
type Worker {
|
||||
key: String!
|
||||
started: Boolean!
|
||||
}
|
||||
|
||||
type WorkersListAllQueryResult {
|
||||
workers: [Worker!]!
|
||||
}
|
||||
|
||||
type WorkersQuery {
|
||||
listAll: WorkersListAllQueryResult!
|
||||
}
|
||||
|
||||
enum HealthStatus {
|
||||
unknown
|
||||
inactive
|
||||
up
|
||||
down
|
||||
}
|
||||
|
||||
type HealthCheck {
|
||||
key: String!
|
||||
status: HealthStatus!
|
||||
timestamp: DateTime!
|
||||
error: String
|
||||
}
|
||||
|
||||
type HealthQuery {
|
||||
status: HealthStatus!
|
||||
checks: [HealthCheck!]!
|
||||
}
|
||||
|
||||
104
graphql/schema/queue.graphqls
Normal file
104
graphql/schema/queue.graphqls
Normal file
@ -0,0 +1,104 @@
|
||||
type QueueQuery {
|
||||
jobs(input: QueueJobsQueryInput!): QueueJobsQueryResult!
|
||||
metrics(input: QueueMetricsQueryInput!): QueueMetricsQueryResult!
|
||||
}
|
||||
|
||||
input QueueJobsQueryInput {
|
||||
queues: [String!]
|
||||
statuses: [QueueJobStatus!]
|
||||
limit: Int
|
||||
page: Int
|
||||
offset: Int
|
||||
totalCount: Boolean
|
||||
hasNextPage: Boolean
|
||||
facets: QueueJobsFacetsInput
|
||||
orderBy: [QueueJobsOrderByInput!]
|
||||
}
|
||||
|
||||
input QueueJobQueueFacetInput {
|
||||
aggregate: Boolean
|
||||
filter: [String!]
|
||||
}
|
||||
|
||||
input QueueJobStatusFacetInput {
|
||||
aggregate: Boolean
|
||||
filter: [QueueJobStatus!]
|
||||
}
|
||||
|
||||
input QueueJobsFacetsInput {
|
||||
status: QueueJobStatusFacetInput
|
||||
queue: QueueJobQueueFacetInput
|
||||
}
|
||||
|
||||
input QueueJobsOrderByInput {
|
||||
field: QueueJobsOrderByField!
|
||||
descending: Boolean
|
||||
}
|
||||
|
||||
type QueueJob {
|
||||
id: ID!
|
||||
queue: String!
|
||||
status: QueueJobStatus!
|
||||
payload: String!
|
||||
priority: Int!
|
||||
retries: Int!
|
||||
maxRetries: Int!
|
||||
runAfter: DateTime!
|
||||
ranAt: DateTime
|
||||
error: String
|
||||
createdAt: DateTime!
|
||||
}
|
||||
|
||||
type QueueJobsQueryResult {
|
||||
totalCount: Int!
|
||||
hasNextPage: Boolean
|
||||
items: [QueueJob!]!
|
||||
aggregations: QueueJobsAggregations!
|
||||
}
|
||||
|
||||
type QueueJobQueueAgg {
|
||||
value: String!
|
||||
label: String!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type QueueJobStatusAgg {
|
||||
value: QueueJobStatus!
|
||||
label: String!
|
||||
count: Int!
|
||||
}
|
||||
|
||||
type QueueJobsAggregations {
|
||||
queue: [QueueJobQueueAgg!]
|
||||
status: [QueueJobStatusAgg!]
|
||||
}
|
||||
|
||||
enum QueueJobStatus {
|
||||
pending
|
||||
retry
|
||||
failed
|
||||
processed
|
||||
}
|
||||
|
||||
input QueueEnqueueReprocessTorrentsBatchInput {
|
||||
batchSize: Int
|
||||
chunkSize: Int
|
||||
contentTypes: [ContentType]
|
||||
orphans: Boolean
|
||||
classifierRematch: Boolean
|
||||
classifierWorkflow: String
|
||||
apisDisabled: Boolean
|
||||
localSearchDisabled: Boolean
|
||||
}
|
||||
|
||||
type QueueMutation {
|
||||
purgeJobs(input: QueuePurgeJobsInput!): Void
|
||||
enqueueReprocessTorrentsBatch(
|
||||
input: QueueEnqueueReprocessTorrentsBatchInput
|
||||
): Void
|
||||
}
|
||||
|
||||
input QueuePurgeJobsInput {
|
||||
queues: [String!]
|
||||
statuses: [QueueJobStatus!]
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
scalar Hash20
|
||||
scalar Date
|
||||
scalar DateTime
|
||||
scalar Duration
|
||||
scalar Void
|
||||
scalar Year
|
||||
|
||||
@ -1,12 +1,16 @@
|
||||
input SearchQueryInput {
|
||||
input TorrentContentSearchQueryInput {
|
||||
queryString: String
|
||||
limit: Int
|
||||
page: Int
|
||||
offset: Int
|
||||
totalCount: Boolean
|
||||
"""
|
||||
hasNextPage if true, the search result will include the hasNextPage field, indicating if there are more results to fetch
|
||||
"""
|
||||
hasNextPage: Boolean
|
||||
infoHashes: [Hash20!]
|
||||
facets: TorrentContentFacetsInput
|
||||
orderBy: [TorrentContentOrderByInput!]
|
||||
cached: Boolean
|
||||
aggregationBudget: Float
|
||||
}
|
||||
@ -159,6 +163,6 @@ type TorrentContentSearchResult {
|
||||
}
|
||||
|
||||
input TorrentContentOrderByInput {
|
||||
field: TorrentContentOrderBy!
|
||||
field: TorrentContentOrderByField!
|
||||
descending: Boolean
|
||||
}
|
||||
21
graphql/schema/torrent_files.graphqls
Normal file
21
graphql/schema/torrent_files.graphqls
Normal file
@ -0,0 +1,21 @@
|
||||
input TorrentFilesQueryInput {
|
||||
limit: Int
|
||||
page: Int
|
||||
offset: Int
|
||||
totalCount: Boolean
|
||||
hasNextPage: Boolean
|
||||
infoHashes: [Hash20!]
|
||||
orderBy: [TorrentFilesOrderByInput!]
|
||||
cached: Boolean
|
||||
}
|
||||
|
||||
input TorrentFilesOrderByInput {
|
||||
field: TorrentFilesOrderByField!
|
||||
descending: Boolean
|
||||
}
|
||||
|
||||
type TorrentFilesQueryResult {
|
||||
totalCount: Int!
|
||||
hasNextPage: Boolean
|
||||
items: [TorrentFile!]!
|
||||
}
|
||||
@ -12,7 +12,9 @@ import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/migrations"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/dhtcrawler/dhtcrawlerfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health/healthfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/importer/importerfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/metricsfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/processor/processorfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol/dht/dhtfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol/metainfo/metainfofx"
|
||||
@ -34,9 +36,11 @@ func New() fx.Option {
|
||||
dhtfx.New(),
|
||||
databasefx.New(),
|
||||
gqlfx.New(),
|
||||
healthfx.New(),
|
||||
httpserverfx.New(),
|
||||
importerfx.New(),
|
||||
metainfofx.New(),
|
||||
metricsfx.New(),
|
||||
processorfx.New(),
|
||||
queuefx.New(),
|
||||
telemetryfx.New(),
|
||||
|
||||
@ -3,7 +3,6 @@ package boilerplatefx
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/cli/clifx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/config/configfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/healthcheck/healthcheckfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/logging/loggingfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/validation/validationfx"
|
||||
"go.uber.org/fx"
|
||||
@ -14,7 +13,6 @@ func New() fx.Option {
|
||||
"boilerplate",
|
||||
clifx.New(),
|
||||
configfx.New(),
|
||||
healthcheckfx.New(),
|
||||
loggingfx.New(),
|
||||
validationfx.New(),
|
||||
)
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"github.com/hellofresh/health-go/v5"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
type Params struct {
|
||||
fx.In
|
||||
Options []health.Option `group:"healthcheck_options"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Health *health.Health
|
||||
}
|
||||
|
||||
func New(p Params) (Result, error) {
|
||||
h, err := health.New(append(p.Options, health.WithSystemInfo())...)
|
||||
if err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
return Result{Health: h}, nil
|
||||
}
|
||||
@ -1,15 +0,0 @@
|
||||
package healthcheckfx
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/healthcheck"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/healthcheck/httpserver"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
func New() fx.Option {
|
||||
return fx.Module(
|
||||
"healthcheck",
|
||||
fx.Provide(healthcheck.New),
|
||||
fx.Provide(httpserver.New),
|
||||
)
|
||||
}
|
||||
@ -1,41 +0,0 @@
|
||||
package httpserver
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/httpserver"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/hellofresh/health-go/v5"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
type Params struct {
|
||||
fx.In
|
||||
Health *health.Health
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Option httpserver.Option `group:"http_server_options"`
|
||||
}
|
||||
|
||||
func New(p Params) (r Result, err error) {
|
||||
handler := p.Health.Handler()
|
||||
r.Option = &builder{
|
||||
handler: func(c *gin.Context) {
|
||||
handler.ServeHTTP(c.Writer, c.Request)
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
handler gin.HandlerFunc
|
||||
}
|
||||
|
||||
func (builder) Key() string {
|
||||
return "status"
|
||||
}
|
||||
|
||||
func (b *builder) Apply(e *gin.Engine) error {
|
||||
e.GET("/status", b.handler)
|
||||
return nil
|
||||
}
|
||||
@ -135,7 +135,7 @@ func CustomRecoveryWithZap(logger ZapLogger, stack bool, recovery gin.RecoveryFu
|
||||
zap.String("request", string(httpRequest)),
|
||||
)
|
||||
// If the connection is dead, we can't write a status to it.
|
||||
c.Error(err.(error)) // nolint: errcheck
|
||||
_ = c.Error(err.(error))
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import "sync"
|
||||
|
||||
type Lazy[T any] interface {
|
||||
Get() (T, error)
|
||||
Decorate(func(T) (T, error))
|
||||
// IfInitialized calls the given function if the value has been initialized (useful for shutdown logic)
|
||||
IfInitialized(func(T) error) error
|
||||
}
|
||||
@ -30,6 +31,20 @@ func (l *lazy[T]) Get() (T, error) {
|
||||
return l.v, l.err
|
||||
}
|
||||
|
||||
func (l *lazy[T]) Decorate(fn func(T) (T, error)) {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
baseFn := l.fn
|
||||
l.fn = func() (T, error) {
|
||||
v, err := baseFn()
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
v, err = fn(v)
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lazy[T]) IfInitialized(fn func(T) error) error {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"go.uber.org/fx"
|
||||
"go.uber.org/zap"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -125,8 +126,13 @@ func (r *registry) Workers() []Worker {
|
||||
var workers []Worker
|
||||
r.mutex.RLock()
|
||||
defer r.mutex.RUnlock()
|
||||
for _, w := range r.workers {
|
||||
workers = append(workers, w)
|
||||
keys := make([]string, 0, len(r.workers))
|
||||
for k := range r.workers {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
workers = append(workers, r.workers[k])
|
||||
}
|
||||
return workers
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||
|
||||
package classifier_mocks
|
||||
|
||||
|
||||
@ -1,24 +1,31 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
"database/sql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func ToSQL(db *gorm.DB) string {
|
||||
return db.ToSQL(func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Find(&[]interface{}{})
|
||||
})
|
||||
return db.ToSQL(func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Find(&[]interface{}{})
|
||||
})
|
||||
}
|
||||
|
||||
type BudgetedCountResult struct {
|
||||
Count int64
|
||||
Cost float64
|
||||
BudgetExceeded bool
|
||||
Count int64
|
||||
Cost float64
|
||||
BudgetExceeded bool
|
||||
}
|
||||
|
||||
func BudgetedCount(db *gorm.DB, budget float64) (BudgetedCountResult, error) {
|
||||
row := db.Raw("SELECT count, cost, budget_exceeded from budgeted_count(?, ?)", ToSQL(db), budget).Row()
|
||||
result := BudgetedCountResult{}
|
||||
err := row.Scan(&result.Count, &result.Cost, &result.BudgetExceeded)
|
||||
return result, err
|
||||
var row *sql.Row
|
||||
q := ToSQL(db)
|
||||
if budget > 0 {
|
||||
row = db.Raw("SELECT count, cost, budget_exceeded from budgeted_count(?, ?)", q, budget).Row()
|
||||
} else {
|
||||
row = db.Raw("SELECT count(*) as count, 0 as cost, false as budget_exceeded from (" + q + ")").Row()
|
||||
}
|
||||
result := BudgetedCountResult{}
|
||||
err := row.Scan(&result.Count, &result.Cost, &result.BudgetExceeded)
|
||||
return result, err
|
||||
}
|
||||
|
||||
@ -1,15 +1,11 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gorm.io/gorm/callbacks"
|
||||
)
|
||||
|
||||
func (t torrentContent) CountEstimate() (int64, error) {
|
||||
db := t.UnderlyingDB()
|
||||
callbacks.BuildQuerySQL(db)
|
||||
query := db.Statement.SQL.String()
|
||||
args := db.Statement.Vars
|
||||
fmt.Printf("query: %s, args: %v", query, args)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ func TestExcept_Query(t *testing.T) {
|
||||
db = tt.operation(db)
|
||||
}
|
||||
if db.Error != nil {
|
||||
t.Errorf(db.Error.Error())
|
||||
t.Error(db.Error.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ func TestIntersect_Query(t *testing.T) {
|
||||
db = tt.operation(db)
|
||||
}
|
||||
if db.Error != nil {
|
||||
t.Errorf(db.Error.Error())
|
||||
t.Error(db.Error.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ func TestUnion_Query(t *testing.T) {
|
||||
db = tt.operation(db)
|
||||
}
|
||||
if db.Error != nil {
|
||||
t.Errorf(db.Error.Error())
|
||||
t.Error(db.Error.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ func TestWith_Query(t *testing.T) {
|
||||
db = tt.operation(db)
|
||||
}
|
||||
if db.Error != nil {
|
||||
t.Errorf(db.Error.Error())
|
||||
t.Error(db.Error.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -42,6 +42,7 @@ func New(p Params) Result {
|
||||
SlowThreshold: time.Second * 30,
|
||||
},
|
||||
}).GormLogger,
|
||||
DisableAutomaticPing: true,
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, dbErr
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/lazy"
|
||||
"github.com/hellofresh/health-go/v5"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"go.uber.org/fx"
|
||||
"time"
|
||||
)
|
||||
@ -17,25 +17,28 @@ type Params struct {
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Config health.Option `group:"healthcheck_options"`
|
||||
Option health.CheckerOption `group:"health_check_options"`
|
||||
}
|
||||
|
||||
func New(p Params) Result {
|
||||
return Result{
|
||||
Config: health.WithChecks(health.Config{
|
||||
Name: "postgres",
|
||||
Timeout: time.Second * 5,
|
||||
Check: func(ctx context.Context) error {
|
||||
db, dbErr := p.DB.Get()
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to get database connection: %w", dbErr)
|
||||
}
|
||||
pingErr := db.PingContext(ctx)
|
||||
if pingErr != nil {
|
||||
return fmt.Errorf("failed to ping database: %w", pingErr)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}),
|
||||
Option: health.WithPeriodicCheck(
|
||||
time.Second*30,
|
||||
time.Second*1,
|
||||
health.Check{
|
||||
Name: "postgres",
|
||||
Timeout: time.Second * 5,
|
||||
Check: func(ctx context.Context) error {
|
||||
db, dbErr := p.DB.Get()
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to get database connection: %w", dbErr)
|
||||
}
|
||||
pingErr := db.PingContext(ctx)
|
||||
if pingErr != nil {
|
||||
return fmt.Errorf("failed to ping database: %w", pingErr)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
type SearchParams struct {
|
||||
QueryString model.NullString
|
||||
Limit model.NullUint
|
||||
Page model.NullUint
|
||||
Offset model.NullUint
|
||||
TotalCount model.NullBool
|
||||
HasNextPage model.NullBool
|
||||
@ -19,11 +20,18 @@ func (s SearchParams) Option() Option {
|
||||
if s.QueryString.Valid {
|
||||
options = append(options, QueryString(s.QueryString.String), OrderByQueryStringRank())
|
||||
}
|
||||
offset := uint(0)
|
||||
if s.Limit.Valid {
|
||||
options = append(options, Limit(s.Limit.Uint))
|
||||
if s.Page.Valid && s.Page.Uint > 0 {
|
||||
offset += (s.Page.Uint - 1) * s.Limit.Uint
|
||||
}
|
||||
}
|
||||
if s.Offset.Valid {
|
||||
options = append(options, Offset(s.Offset.Uint))
|
||||
offset += s.Offset.Uint
|
||||
}
|
||||
if offset > 0 {
|
||||
options = append(options, Offset(offset))
|
||||
}
|
||||
if s.TotalCount.Valid {
|
||||
options = append(options, WithTotalCount(s.TotalCount.Bool))
|
||||
|
||||
17
internal/database/search/criteria_queue_job_queue.go
Normal file
17
internal/database/search/criteria_queue_job_queue.go
Normal file
@ -0,0 +1,17 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"gorm.io/gen/field"
|
||||
)
|
||||
|
||||
func QueueJobQueueCriteria(queues ...string) query.Criteria {
|
||||
return query.DaoCriteria{
|
||||
Conditions: func(ctx query.DbContext) ([]field.Expr, error) {
|
||||
q := ctx.Query()
|
||||
return []field.Expr{
|
||||
q.QueueJob.Queue.In(queues...),
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
22
internal/database/search/criteria_queue_job_status.go
Normal file
22
internal/database/search/criteria_queue_job_status.go
Normal file
@ -0,0 +1,22 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"gorm.io/gen/field"
|
||||
)
|
||||
|
||||
func QueueJobStatusCriteria(statuses ...model.QueueJobStatus) query.Criteria {
|
||||
strStatuses := make([]string, 0, len(statuses))
|
||||
for _, s := range statuses {
|
||||
strStatuses = append(strStatuses, s.String())
|
||||
}
|
||||
return query.DaoCriteria{
|
||||
Conditions: func(ctx query.DbContext) ([]field.Expr, error) {
|
||||
q := ctx.Query()
|
||||
return []field.Expr{
|
||||
q.QueueJob.Status.In(strStatuses...),
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1,20 +1,11 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol"
|
||||
"gorm.io/gen/field"
|
||||
)
|
||||
|
||||
func TorrentContentInfoHashCriteria(infoHashes ...protocol.ID) query.Criteria {
|
||||
valuers := make([]driver.Valuer, 0, len(infoHashes))
|
||||
for _, infoHash := range infoHashes {
|
||||
valuers = append(valuers, infoHash)
|
||||
}
|
||||
return query.DaoCriteria{
|
||||
Conditions: func(ctx query.DbContext) ([]field.Expr, error) {
|
||||
return []field.Expr{ctx.Query().TorrentContent.InfoHash.In(valuers...)}, nil
|
||||
},
|
||||
}
|
||||
return infoHashCriteria(model.TableNameTorrentContent, infoHashes...)
|
||||
}
|
||||
|
||||
11
internal/database/search/criteria_torrent_file_info_hash.go
Normal file
11
internal/database/search/criteria_torrent_file_info_hash.go
Normal file
@ -0,0 +1,11 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol"
|
||||
)
|
||||
|
||||
func TorrentFileInfoHashCriteria(infoHashes ...protocol.ID) query.Criteria {
|
||||
return infoHashCriteria(model.TableNameTorrentFile, infoHashes...)
|
||||
}
|
||||
@ -1,20 +1,28 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol"
|
||||
"gorm.io/gen/field"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TorrentInfoHashCriteria(infoHashes ...protocol.ID) query.Criteria {
|
||||
valuers := make([]driver.Valuer, 0, len(infoHashes))
|
||||
for _, infoHash := range infoHashes {
|
||||
valuers = append(valuers, infoHash)
|
||||
return infoHashCriteria(model.TableNameTorrent, infoHashes...)
|
||||
}
|
||||
|
||||
func infoHashCriteria(table string, infoHashes ...protocol.ID) query.Criteria {
|
||||
if len(infoHashes) == 0 {
|
||||
return query.DbCriteria{
|
||||
Sql: "FALSE",
|
||||
}
|
||||
}
|
||||
return query.DaoCriteria{
|
||||
Conditions: func(ctx query.DbContext) ([]field.Expr, error) {
|
||||
return []field.Expr{ctx.Query().Torrent.InfoHash.In(valuers...)}, nil
|
||||
},
|
||||
decodes := make([]string, len(infoHashes))
|
||||
for i, infoHash := range infoHashes {
|
||||
decodes[i] = fmt.Sprintf("DECODE('%s', 'hex')", infoHash.String())
|
||||
}
|
||||
return query.DbCriteria{
|
||||
Sql: fmt.Sprintf("%s.info_hash IN (%s)", table, strings.Join(decodes, ", ")),
|
||||
}
|
||||
}
|
||||
|
||||
43
internal/database/search/facet_queue_job_queue.go
Normal file
43
internal/database/search/facet_queue_job_queue.go
Normal file
@ -0,0 +1,43 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
)
|
||||
|
||||
const QueueJobQueueFacetKey = "queue"
|
||||
|
||||
func QueueJobQueueFacet(options ...query.FacetOption) query.Facet {
|
||||
return queueJobQueueFacet{
|
||||
FacetConfig: query.NewFacetConfig(
|
||||
append([]query.FacetOption{
|
||||
query.FacetHasKey(QueueJobQueueFacetKey),
|
||||
query.FacetHasLabel("Queue"),
|
||||
query.FacetUsesOrLogic(),
|
||||
}, options...)...,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
type queueJobQueueFacet struct {
|
||||
query.FacetConfig
|
||||
}
|
||||
|
||||
var queueNames = []string{"process_torrent", "process_torrent_batch"}
|
||||
|
||||
func (f queueJobQueueFacet) Values(query.FacetContext) (map[string]string, error) {
|
||||
values := make(map[string]string)
|
||||
for _, n := range queueNames {
|
||||
values[n] = n
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (f queueJobQueueFacet) Criteria(filter query.FacetFilter) []query.Criteria {
|
||||
values := filter.Values()
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return []query.Criteria{
|
||||
QueueJobQueueCriteria(filter.Values()...),
|
||||
}
|
||||
}
|
||||
50
internal/database/search/facet_queue_job_status.go
Normal file
50
internal/database/search/facet_queue_job_status.go
Normal file
@ -0,0 +1,50 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"gorm.io/gen/field"
|
||||
)
|
||||
|
||||
const QueueJobStatusFacetKey = "status"
|
||||
|
||||
func QueueJobStatusFacet(options ...query.FacetOption) query.Facet {
|
||||
return queueJobStatusFacet{
|
||||
FacetConfig: query.NewFacetConfig(
|
||||
append([]query.FacetOption{
|
||||
query.FacetHasKey(QueueJobStatusFacetKey),
|
||||
query.FacetHasLabel("Status"),
|
||||
query.FacetUsesOrLogic(),
|
||||
}, options...)...,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
type queueJobStatusFacet struct {
|
||||
query.FacetConfig
|
||||
}
|
||||
|
||||
func (f queueJobStatusFacet) Values(query.FacetContext) (map[string]string, error) {
|
||||
values := make(map[string]string)
|
||||
for _, n := range model.QueueJobStatusNames() {
|
||||
values[n] = n
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (f queueJobStatusFacet) Criteria(filter query.FacetFilter) []query.Criteria {
|
||||
values := filter.Values()
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return []query.Criteria{
|
||||
query.DaoCriteria{
|
||||
Conditions: func(ctx query.DbContext) ([]field.Expr, error) {
|
||||
q := ctx.Query()
|
||||
return []field.Expr{
|
||||
q.QueueJob.Status.In(values...),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -2,25 +2,56 @@ package search
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol"
|
||||
"gorm.io/gen/field"
|
||||
)
|
||||
|
||||
func HydrateTorrentContentTorrent() query.Option {
|
||||
type torrentContentTorrentHydratorConfig struct {
|
||||
files bool
|
||||
}
|
||||
|
||||
type HydrateTorrentContentTorrentOption func(config *torrentContentTorrentHydratorConfig)
|
||||
|
||||
func HydrateTorrentContentTorrentWithFiles() HydrateTorrentContentTorrentOption {
|
||||
return func(config *torrentContentTorrentHydratorConfig) {
|
||||
config.files = true
|
||||
}
|
||||
}
|
||||
|
||||
func HydrateTorrentContentTorrent(options ...HydrateTorrentContentTorrentOption) query.Option {
|
||||
var config torrentContentTorrentHydratorConfig
|
||||
for _, option := range options {
|
||||
option(&config)
|
||||
}
|
||||
return query.HydrateHasOne[TorrentContentResultItem, model.Torrent, protocol.ID](
|
||||
torrentContentTorrentHydrator{},
|
||||
torrentContentTorrentHydrator{config},
|
||||
)
|
||||
}
|
||||
|
||||
type torrentContentTorrentHydrator struct{}
|
||||
type torrentContentTorrentHydrator struct {
|
||||
torrentContentTorrentHydratorConfig
|
||||
}
|
||||
|
||||
func (h torrentContentTorrentHydrator) RootToSubID(root TorrentContentResultItem) (protocol.ID, bool) {
|
||||
return root.InfoHash, true
|
||||
}
|
||||
|
||||
func (h torrentContentTorrentHydrator) GetSubs(ctx context.Context, dbCtx query.DbContext, ids []protocol.ID) ([]model.Torrent, error) {
|
||||
result, err := search{dbCtx.Query()}.Torrents(ctx, query.Where(TorrentInfoHashCriteria(ids...)), TorrentDefaultPreload())
|
||||
result, err := search{dbCtx.Query()}.Torrents(ctx, query.Where(TorrentInfoHashCriteria(ids...)), query.Preload(func(q *dao.Query) []field.RelationField {
|
||||
preload := []field.RelationField{
|
||||
q.Torrent.Sources.RelationField,
|
||||
q.Torrent.Sources.TorrentSource.RelationField,
|
||||
q.Torrent.Hint.RelationField,
|
||||
q.Torrent.Tags.RelationField,
|
||||
}
|
||||
if h.files {
|
||||
preload = append(preload, q.Torrent.Files.RelationField)
|
||||
}
|
||||
return preload
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
7
internal/database/search/order.go
Normal file
7
internal/database/search/order.go
Normal file
@ -0,0 +1,7 @@
|
||||
package search
|
||||
|
||||
//go:generate go run github.com/abice/go-enum --marshal --names --nocase --nocomments --sql --sqlnullstr --values -f order.go -f order_torrent_content.go -f order_torrent_files.go -f order_queue_jobs.go
|
||||
|
||||
// OrderDirection represents sort order directions
|
||||
// ENUM(Ascending, Descending)
|
||||
type OrderDirection string
|
||||
181
internal/database/search/order_enum.go
Normal file
181
internal/database/search/order_enum.go
Normal file
@ -0,0 +1,181 @@
|
||||
// Code generated by go-enum DO NOT EDIT.
|
||||
// Version:
|
||||
// Revision:
|
||||
// Build Date:
|
||||
// Built By:
|
||||
|
||||
package search
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
OrderDirectionAscending OrderDirection = "Ascending"
|
||||
OrderDirectionDescending OrderDirection = "Descending"
|
||||
)
|
||||
|
||||
var ErrInvalidOrderDirection = fmt.Errorf("not a valid OrderDirection, try [%s]", strings.Join(_OrderDirectionNames, ", "))
|
||||
|
||||
var _OrderDirectionNames = []string{
|
||||
string(OrderDirectionAscending),
|
||||
string(OrderDirectionDescending),
|
||||
}
|
||||
|
||||
// OrderDirectionNames returns a list of possible string values of OrderDirection.
|
||||
func OrderDirectionNames() []string {
|
||||
tmp := make([]string, len(_OrderDirectionNames))
|
||||
copy(tmp, _OrderDirectionNames)
|
||||
return tmp
|
||||
}
|
||||
|
||||
// OrderDirectionValues returns a list of the values for OrderDirection
|
||||
func OrderDirectionValues() []OrderDirection {
|
||||
return []OrderDirection{
|
||||
OrderDirectionAscending,
|
||||
OrderDirectionDescending,
|
||||
}
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (x OrderDirection) String() string {
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// IsValid provides a quick way to determine if the typed value is
|
||||
// part of the allowed enumerated values
|
||||
func (x OrderDirection) IsValid() bool {
|
||||
_, err := ParseOrderDirection(string(x))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
var _OrderDirectionValue = map[string]OrderDirection{
|
||||
"Ascending": OrderDirectionAscending,
|
||||
"ascending": OrderDirectionAscending,
|
||||
"Descending": OrderDirectionDescending,
|
||||
"descending": OrderDirectionDescending,
|
||||
}
|
||||
|
||||
// ParseOrderDirection attempts to convert a string to a OrderDirection.
|
||||
func ParseOrderDirection(name string) (OrderDirection, error) {
|
||||
if x, ok := _OrderDirectionValue[name]; ok {
|
||||
return x, nil
|
||||
}
|
||||
// Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to.
|
||||
if x, ok := _OrderDirectionValue[strings.ToLower(name)]; ok {
|
||||
return x, nil
|
||||
}
|
||||
return OrderDirection(""), fmt.Errorf("%s is %w", name, ErrInvalidOrderDirection)
|
||||
}
|
||||
|
||||
// MarshalText implements the text marshaller method.
|
||||
func (x OrderDirection) MarshalText() ([]byte, error) {
|
||||
return []byte(string(x)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the text unmarshaller method.
|
||||
func (x *OrderDirection) UnmarshalText(text []byte) error {
|
||||
tmp, err := ParseOrderDirection(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
var errOrderDirectionNilPtr = errors.New("value pointer is nil") // one per type for package clashes
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *OrderDirection) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
*x = OrderDirection("")
|
||||
return
|
||||
}
|
||||
|
||||
// A wider range of scannable types.
|
||||
// driver.Value values at the top of the list for expediency
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
*x, err = ParseOrderDirection(v)
|
||||
case []byte:
|
||||
*x, err = ParseOrderDirection(string(v))
|
||||
case OrderDirection:
|
||||
*x = v
|
||||
case *OrderDirection:
|
||||
if v == nil {
|
||||
return errOrderDirectionNilPtr
|
||||
}
|
||||
*x = *v
|
||||
case *string:
|
||||
if v == nil {
|
||||
return errOrderDirectionNilPtr
|
||||
}
|
||||
*x, err = ParseOrderDirection(*v)
|
||||
default:
|
||||
return errors.New("invalid type for OrderDirection")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x OrderDirection) Value() (driver.Value, error) {
|
||||
return x.String(), nil
|
||||
}
|
||||
|
||||
type NullOrderDirection struct {
|
||||
OrderDirection OrderDirection
|
||||
Valid bool
|
||||
Set bool
|
||||
}
|
||||
|
||||
func NewNullOrderDirection(val interface{}) (x NullOrderDirection) {
|
||||
err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
_ = err // make any errcheck linters happy
|
||||
return
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *NullOrderDirection) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
x.OrderDirection, x.Valid = OrderDirection(""), false
|
||||
return
|
||||
}
|
||||
|
||||
err = x.OrderDirection.Scan(value)
|
||||
x.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullOrderDirection) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return x.OrderDirection.String(), nil
|
||||
}
|
||||
|
||||
// MarshalJSON correctly serializes a NullOrderDirection to JSON.
|
||||
func (n NullOrderDirection) MarshalJSON() ([]byte, error) {
|
||||
const nullStr = "null"
|
||||
if n.Valid {
|
||||
return json.Marshal(n.OrderDirection)
|
||||
}
|
||||
return []byte(nullStr), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON correctly deserializes a NullOrderDirection from JSON.
|
||||
func (n *NullOrderDirection) UnmarshalJSON(b []byte) error {
|
||||
n.Set = true
|
||||
var x interface{}
|
||||
err := json.Unmarshal(b, &x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = n.Scan(x)
|
||||
return err
|
||||
}
|
||||
65
internal/database/search/order_queue_jobs.go
Normal file
65
internal/database/search/order_queue_jobs.go
Normal file
@ -0,0 +1,65 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/maps"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// QueueJobsOrderBy represents sort orders for queue jobs search results
|
||||
// ENUM(created_at, ran_at, priority)
|
||||
type QueueJobsOrderBy string
|
||||
|
||||
func (ob QueueJobsOrderBy) Clauses(direction OrderDirection) []query.OrderByColumn {
|
||||
desc := direction == OrderDirectionDescending
|
||||
switch ob {
|
||||
case QueueJobsOrderByCreatedAt:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameQueueJob,
|
||||
Name: "created_at",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case QueueJobsOrderByRanAt:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameQueueJob,
|
||||
Name: "ran_at",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case QueueJobsOrderByPriority:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameQueueJob,
|
||||
Name: "priority",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
default:
|
||||
return []query.OrderByColumn{}
|
||||
}
|
||||
}
|
||||
|
||||
type QueueJobsFullOrderBy maps.InsertMap[QueueJobsOrderBy, OrderDirection]
|
||||
|
||||
func (fob QueueJobsFullOrderBy) Clauses() []query.OrderByColumn {
|
||||
im := maps.InsertMap[QueueJobsOrderBy, OrderDirection](fob)
|
||||
clauses := make([]query.OrderByColumn, 0, im.Len())
|
||||
for _, ob := range im.Entries() {
|
||||
clauses = append(clauses, ob.Key.Clauses(ob.Value)...)
|
||||
}
|
||||
return clauses
|
||||
}
|
||||
|
||||
func (fob QueueJobsFullOrderBy) Option() query.Option {
|
||||
return query.OrderBy(fob.Clauses()...)
|
||||
}
|
||||
183
internal/database/search/order_queue_jobs_enum.go
Normal file
183
internal/database/search/order_queue_jobs_enum.go
Normal file
@ -0,0 +1,183 @@
|
||||
// Code generated by go-enum DO NOT EDIT.
|
||||
// Version:
|
||||
// Revision:
|
||||
// Build Date:
|
||||
// Built By:
|
||||
|
||||
package search
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
QueueJobsOrderByCreatedAt QueueJobsOrderBy = "created_at"
|
||||
QueueJobsOrderByRanAt QueueJobsOrderBy = "ran_at"
|
||||
QueueJobsOrderByPriority QueueJobsOrderBy = "priority"
|
||||
)
|
||||
|
||||
var ErrInvalidQueueJobsOrderBy = fmt.Errorf("not a valid QueueJobsOrderBy, try [%s]", strings.Join(_QueueJobsOrderByNames, ", "))
|
||||
|
||||
var _QueueJobsOrderByNames = []string{
|
||||
string(QueueJobsOrderByCreatedAt),
|
||||
string(QueueJobsOrderByRanAt),
|
||||
string(QueueJobsOrderByPriority),
|
||||
}
|
||||
|
||||
// QueueJobsOrderByNames returns a list of possible string values of QueueJobsOrderBy.
|
||||
func QueueJobsOrderByNames() []string {
|
||||
tmp := make([]string, len(_QueueJobsOrderByNames))
|
||||
copy(tmp, _QueueJobsOrderByNames)
|
||||
return tmp
|
||||
}
|
||||
|
||||
// QueueJobsOrderByValues returns a list of the values for QueueJobsOrderBy
|
||||
func QueueJobsOrderByValues() []QueueJobsOrderBy {
|
||||
return []QueueJobsOrderBy{
|
||||
QueueJobsOrderByCreatedAt,
|
||||
QueueJobsOrderByRanAt,
|
||||
QueueJobsOrderByPriority,
|
||||
}
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (x QueueJobsOrderBy) String() string {
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// IsValid provides a quick way to determine if the typed value is
|
||||
// part of the allowed enumerated values
|
||||
func (x QueueJobsOrderBy) IsValid() bool {
|
||||
_, err := ParseQueueJobsOrderBy(string(x))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
var _QueueJobsOrderByValue = map[string]QueueJobsOrderBy{
|
||||
"created_at": QueueJobsOrderByCreatedAt,
|
||||
"ran_at": QueueJobsOrderByRanAt,
|
||||
"priority": QueueJobsOrderByPriority,
|
||||
}
|
||||
|
||||
// ParseQueueJobsOrderBy attempts to convert a string to a QueueJobsOrderBy.
|
||||
func ParseQueueJobsOrderBy(name string) (QueueJobsOrderBy, error) {
|
||||
if x, ok := _QueueJobsOrderByValue[name]; ok {
|
||||
return x, nil
|
||||
}
|
||||
// Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to.
|
||||
if x, ok := _QueueJobsOrderByValue[strings.ToLower(name)]; ok {
|
||||
return x, nil
|
||||
}
|
||||
return QueueJobsOrderBy(""), fmt.Errorf("%s is %w", name, ErrInvalidQueueJobsOrderBy)
|
||||
}
|
||||
|
||||
// MarshalText implements the text marshaller method.
|
||||
func (x QueueJobsOrderBy) MarshalText() ([]byte, error) {
|
||||
return []byte(string(x)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the text unmarshaller method.
|
||||
func (x *QueueJobsOrderBy) UnmarshalText(text []byte) error {
|
||||
tmp, err := ParseQueueJobsOrderBy(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
var errQueueJobsOrderByNilPtr = errors.New("value pointer is nil") // one per type for package clashes
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *QueueJobsOrderBy) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
*x = QueueJobsOrderBy("")
|
||||
return
|
||||
}
|
||||
|
||||
// A wider range of scannable types.
|
||||
// driver.Value values at the top of the list for expediency
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
*x, err = ParseQueueJobsOrderBy(v)
|
||||
case []byte:
|
||||
*x, err = ParseQueueJobsOrderBy(string(v))
|
||||
case QueueJobsOrderBy:
|
||||
*x = v
|
||||
case *QueueJobsOrderBy:
|
||||
if v == nil {
|
||||
return errQueueJobsOrderByNilPtr
|
||||
}
|
||||
*x = *v
|
||||
case *string:
|
||||
if v == nil {
|
||||
return errQueueJobsOrderByNilPtr
|
||||
}
|
||||
*x, err = ParseQueueJobsOrderBy(*v)
|
||||
default:
|
||||
return errors.New("invalid type for QueueJobsOrderBy")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x QueueJobsOrderBy) Value() (driver.Value, error) {
|
||||
return x.String(), nil
|
||||
}
|
||||
|
||||
type NullQueueJobsOrderBy struct {
|
||||
QueueJobsOrderBy QueueJobsOrderBy
|
||||
Valid bool
|
||||
Set bool
|
||||
}
|
||||
|
||||
func NewNullQueueJobsOrderBy(val interface{}) (x NullQueueJobsOrderBy) {
|
||||
err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
_ = err // make any errcheck linters happy
|
||||
return
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *NullQueueJobsOrderBy) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
x.QueueJobsOrderBy, x.Valid = QueueJobsOrderBy(""), false
|
||||
return
|
||||
}
|
||||
|
||||
err = x.QueueJobsOrderBy.Scan(value)
|
||||
x.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullQueueJobsOrderBy) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return x.QueueJobsOrderBy.String(), nil
|
||||
}
|
||||
|
||||
// MarshalJSON correctly serializes a NullQueueJobsOrderBy to JSON.
|
||||
func (n NullQueueJobsOrderBy) MarshalJSON() ([]byte, error) {
|
||||
const nullStr = "null"
|
||||
if n.Valid {
|
||||
return json.Marshal(n.QueueJobsOrderBy)
|
||||
}
|
||||
return []byte(nullStr), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON correctly deserializes a NullQueueJobsOrderBy from JSON.
|
||||
func (n *NullQueueJobsOrderBy) UnmarshalJSON(b []byte) error {
|
||||
n.Set = true
|
||||
var x interface{}
|
||||
err := json.Unmarshal(b, &x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = n.Scan(x)
|
||||
return err
|
||||
}
|
||||
@ -7,16 +7,10 @@ import (
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/abice/go-enum --marshal --names --nocase --nocomments --sql --sqlnullstr --values -f order_torrent_content.go
|
||||
|
||||
// TorrentContentOrderBy represents sort orders for torrent content search results
|
||||
// ENUM(Relevance, PublishedAt, UpdatedAt, Size, Files, Seeders, Leechers, Name, InfoHash)
|
||||
// ENUM(relevance, published_at, updated_at, size, files_count, seeders, leechers, name, info_hash)
|
||||
type TorrentContentOrderBy string
|
||||
|
||||
// OrderDirection represents sort order directions
|
||||
// ENUM(Ascending, Descending)
|
||||
type OrderDirection string
|
||||
|
||||
func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderByColumn {
|
||||
desc := direction == OrderDirectionDescending
|
||||
switch ob {
|
||||
@ -37,7 +31,16 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
Name: "published_at",
|
||||
},
|
||||
Desc: desc,
|
||||
}}}
|
||||
},
|
||||
}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderByUpdatedAt:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
@ -46,7 +49,15 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
Name: "updated_at",
|
||||
},
|
||||
Desc: desc,
|
||||
}}}
|
||||
}}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderBySize:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
@ -55,8 +66,16 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
Name: "size",
|
||||
},
|
||||
Desc: desc,
|
||||
}}}
|
||||
case TorrentContentOrderByFiles:
|
||||
}}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderByFilesCount:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
@ -64,7 +83,15 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
Raw: true,
|
||||
},
|
||||
Desc: desc,
|
||||
}}}
|
||||
}}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderBySeeders:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
@ -74,6 +101,14 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderByLeechers:
|
||||
return []query.OrderByColumn{{
|
||||
@ -84,6 +119,14 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}, {
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentContent,
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentContentOrderByName:
|
||||
return []query.OrderByColumn{{
|
||||
@ -103,7 +146,8 @@ func (ob TorrentContentOrderBy) Clauses(direction OrderDirection) []query.OrderB
|
||||
Name: "info_hash",
|
||||
},
|
||||
Desc: desc,
|
||||
}}}
|
||||
},
|
||||
}}
|
||||
default:
|
||||
return []query.OrderByColumn{}
|
||||
}
|
||||
|
||||
@ -15,181 +15,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
OrderDirectionAscending OrderDirection = "Ascending"
|
||||
OrderDirectionDescending OrderDirection = "Descending"
|
||||
)
|
||||
|
||||
var ErrInvalidOrderDirection = fmt.Errorf("not a valid OrderDirection, try [%s]", strings.Join(_OrderDirectionNames, ", "))
|
||||
|
||||
var _OrderDirectionNames = []string{
|
||||
string(OrderDirectionAscending),
|
||||
string(OrderDirectionDescending),
|
||||
}
|
||||
|
||||
// OrderDirectionNames returns a list of possible string values of OrderDirection.
|
||||
func OrderDirectionNames() []string {
|
||||
tmp := make([]string, len(_OrderDirectionNames))
|
||||
copy(tmp, _OrderDirectionNames)
|
||||
return tmp
|
||||
}
|
||||
|
||||
// OrderDirectionValues returns a list of the values for OrderDirection
|
||||
func OrderDirectionValues() []OrderDirection {
|
||||
return []OrderDirection{
|
||||
OrderDirectionAscending,
|
||||
OrderDirectionDescending,
|
||||
}
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (x OrderDirection) String() string {
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// IsValid provides a quick way to determine if the typed value is
|
||||
// part of the allowed enumerated values
|
||||
func (x OrderDirection) IsValid() bool {
|
||||
_, err := ParseOrderDirection(string(x))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
var _OrderDirectionValue = map[string]OrderDirection{
|
||||
"Ascending": OrderDirectionAscending,
|
||||
"ascending": OrderDirectionAscending,
|
||||
"Descending": OrderDirectionDescending,
|
||||
"descending": OrderDirectionDescending,
|
||||
}
|
||||
|
||||
// ParseOrderDirection attempts to convert a string to a OrderDirection.
|
||||
func ParseOrderDirection(name string) (OrderDirection, error) {
|
||||
if x, ok := _OrderDirectionValue[name]; ok {
|
||||
return x, nil
|
||||
}
|
||||
// Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to.
|
||||
if x, ok := _OrderDirectionValue[strings.ToLower(name)]; ok {
|
||||
return x, nil
|
||||
}
|
||||
return OrderDirection(""), fmt.Errorf("%s is %w", name, ErrInvalidOrderDirection)
|
||||
}
|
||||
|
||||
// MarshalText implements the text marshaller method.
|
||||
func (x OrderDirection) MarshalText() ([]byte, error) {
|
||||
return []byte(string(x)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the text unmarshaller method.
|
||||
func (x *OrderDirection) UnmarshalText(text []byte) error {
|
||||
tmp, err := ParseOrderDirection(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
var errOrderDirectionNilPtr = errors.New("value pointer is nil") // one per type for package clashes
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *OrderDirection) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
*x = OrderDirection("")
|
||||
return
|
||||
}
|
||||
|
||||
// A wider range of scannable types.
|
||||
// driver.Value values at the top of the list for expediency
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
*x, err = ParseOrderDirection(v)
|
||||
case []byte:
|
||||
*x, err = ParseOrderDirection(string(v))
|
||||
case OrderDirection:
|
||||
*x = v
|
||||
case *OrderDirection:
|
||||
if v == nil {
|
||||
return errOrderDirectionNilPtr
|
||||
}
|
||||
*x = *v
|
||||
case *string:
|
||||
if v == nil {
|
||||
return errOrderDirectionNilPtr
|
||||
}
|
||||
*x, err = ParseOrderDirection(*v)
|
||||
default:
|
||||
return errors.New("invalid type for OrderDirection")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x OrderDirection) Value() (driver.Value, error) {
|
||||
return x.String(), nil
|
||||
}
|
||||
|
||||
type NullOrderDirection struct {
|
||||
OrderDirection OrderDirection
|
||||
Valid bool
|
||||
Set bool
|
||||
}
|
||||
|
||||
func NewNullOrderDirection(val interface{}) (x NullOrderDirection) {
|
||||
err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
_ = err // make any errcheck linters happy
|
||||
return
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *NullOrderDirection) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
x.OrderDirection, x.Valid = OrderDirection(""), false
|
||||
return
|
||||
}
|
||||
|
||||
err = x.OrderDirection.Scan(value)
|
||||
x.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullOrderDirection) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return x.OrderDirection.String(), nil
|
||||
}
|
||||
|
||||
// MarshalJSON correctly serializes a NullOrderDirection to JSON.
|
||||
func (n NullOrderDirection) MarshalJSON() ([]byte, error) {
|
||||
const nullStr = "null"
|
||||
if n.Valid {
|
||||
return json.Marshal(n.OrderDirection)
|
||||
}
|
||||
return []byte(nullStr), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON correctly deserializes a NullOrderDirection from JSON.
|
||||
func (n *NullOrderDirection) UnmarshalJSON(b []byte) error {
|
||||
n.Set = true
|
||||
var x interface{}
|
||||
err := json.Unmarshal(b, &x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = n.Scan(x)
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
TorrentContentOrderByRelevance TorrentContentOrderBy = "Relevance"
|
||||
TorrentContentOrderByPublishedAt TorrentContentOrderBy = "PublishedAt"
|
||||
TorrentContentOrderByUpdatedAt TorrentContentOrderBy = "UpdatedAt"
|
||||
TorrentContentOrderBySize TorrentContentOrderBy = "Size"
|
||||
TorrentContentOrderByFiles TorrentContentOrderBy = "Files"
|
||||
TorrentContentOrderBySeeders TorrentContentOrderBy = "Seeders"
|
||||
TorrentContentOrderByLeechers TorrentContentOrderBy = "Leechers"
|
||||
TorrentContentOrderByName TorrentContentOrderBy = "Name"
|
||||
TorrentContentOrderByInfoHash TorrentContentOrderBy = "InfoHash"
|
||||
TorrentContentOrderByRelevance TorrentContentOrderBy = "relevance"
|
||||
TorrentContentOrderByPublishedAt TorrentContentOrderBy = "published_at"
|
||||
TorrentContentOrderByUpdatedAt TorrentContentOrderBy = "updated_at"
|
||||
TorrentContentOrderBySize TorrentContentOrderBy = "size"
|
||||
TorrentContentOrderByFilesCount TorrentContentOrderBy = "files_count"
|
||||
TorrentContentOrderBySeeders TorrentContentOrderBy = "seeders"
|
||||
TorrentContentOrderByLeechers TorrentContentOrderBy = "leechers"
|
||||
TorrentContentOrderByName TorrentContentOrderBy = "name"
|
||||
TorrentContentOrderByInfoHash TorrentContentOrderBy = "info_hash"
|
||||
)
|
||||
|
||||
var ErrInvalidTorrentContentOrderBy = fmt.Errorf("not a valid TorrentContentOrderBy, try [%s]", strings.Join(_TorrentContentOrderByNames, ", "))
|
||||
@ -199,7 +33,7 @@ var _TorrentContentOrderByNames = []string{
|
||||
string(TorrentContentOrderByPublishedAt),
|
||||
string(TorrentContentOrderByUpdatedAt),
|
||||
string(TorrentContentOrderBySize),
|
||||
string(TorrentContentOrderByFiles),
|
||||
string(TorrentContentOrderByFilesCount),
|
||||
string(TorrentContentOrderBySeeders),
|
||||
string(TorrentContentOrderByLeechers),
|
||||
string(TorrentContentOrderByName),
|
||||
@ -220,7 +54,7 @@ func TorrentContentOrderByValues() []TorrentContentOrderBy {
|
||||
TorrentContentOrderByPublishedAt,
|
||||
TorrentContentOrderByUpdatedAt,
|
||||
TorrentContentOrderBySize,
|
||||
TorrentContentOrderByFiles,
|
||||
TorrentContentOrderByFilesCount,
|
||||
TorrentContentOrderBySeeders,
|
||||
TorrentContentOrderByLeechers,
|
||||
TorrentContentOrderByName,
|
||||
@ -241,24 +75,15 @@ func (x TorrentContentOrderBy) IsValid() bool {
|
||||
}
|
||||
|
||||
var _TorrentContentOrderByValue = map[string]TorrentContentOrderBy{
|
||||
"Relevance": TorrentContentOrderByRelevance,
|
||||
"relevance": TorrentContentOrderByRelevance,
|
||||
"PublishedAt": TorrentContentOrderByPublishedAt,
|
||||
"publishedat": TorrentContentOrderByPublishedAt,
|
||||
"UpdatedAt": TorrentContentOrderByUpdatedAt,
|
||||
"updatedat": TorrentContentOrderByUpdatedAt,
|
||||
"Size": TorrentContentOrderBySize,
|
||||
"size": TorrentContentOrderBySize,
|
||||
"Files": TorrentContentOrderByFiles,
|
||||
"files": TorrentContentOrderByFiles,
|
||||
"Seeders": TorrentContentOrderBySeeders,
|
||||
"seeders": TorrentContentOrderBySeeders,
|
||||
"Leechers": TorrentContentOrderByLeechers,
|
||||
"leechers": TorrentContentOrderByLeechers,
|
||||
"Name": TorrentContentOrderByName,
|
||||
"name": TorrentContentOrderByName,
|
||||
"InfoHash": TorrentContentOrderByInfoHash,
|
||||
"infohash": TorrentContentOrderByInfoHash,
|
||||
"relevance": TorrentContentOrderByRelevance,
|
||||
"published_at": TorrentContentOrderByPublishedAt,
|
||||
"updated_at": TorrentContentOrderByUpdatedAt,
|
||||
"size": TorrentContentOrderBySize,
|
||||
"files_count": TorrentContentOrderByFilesCount,
|
||||
"seeders": TorrentContentOrderBySeeders,
|
||||
"leechers": TorrentContentOrderByLeechers,
|
||||
"name": TorrentContentOrderByName,
|
||||
"info_hash": TorrentContentOrderByInfoHash,
|
||||
}
|
||||
|
||||
// ParseTorrentContentOrderBy attempts to convert a string to a TorrentContentOrderBy.
|
||||
|
||||
75
internal/database/search/order_torrent_files.go
Normal file
75
internal/database/search/order_torrent_files.go
Normal file
@ -0,0 +1,75 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/maps"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// TorrentFilesOrderBy represents sort orders for torrent content search results
|
||||
// ENUM(index, path, extension, size)
|
||||
type TorrentFilesOrderBy string
|
||||
|
||||
func (ob TorrentFilesOrderBy) Clauses(direction OrderDirection) []query.OrderByColumn {
|
||||
desc := direction == OrderDirectionDescending
|
||||
switch ob {
|
||||
case TorrentFilesOrderByIndex:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentFile,
|
||||
Name: "index",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentFilesOrderByPath:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentFile,
|
||||
Name: "path",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentFilesOrderByExtension:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentFile,
|
||||
Name: "extension",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
case TorrentFilesOrderBySize:
|
||||
return []query.OrderByColumn{{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: model.TableNameTorrentFile,
|
||||
Name: "size",
|
||||
},
|
||||
Desc: desc,
|
||||
},
|
||||
}}
|
||||
default:
|
||||
return []query.OrderByColumn{}
|
||||
}
|
||||
}
|
||||
|
||||
type TorrentFilesFullOrderBy maps.InsertMap[TorrentFilesOrderBy, OrderDirection]
|
||||
|
||||
func (fob TorrentFilesFullOrderBy) Clauses() []query.OrderByColumn {
|
||||
im := maps.InsertMap[TorrentFilesOrderBy, OrderDirection](fob)
|
||||
clauses := make([]query.OrderByColumn, 0, im.Len())
|
||||
for _, ob := range im.Entries() {
|
||||
clauses = append(clauses, ob.Key.Clauses(ob.Value)...)
|
||||
}
|
||||
return clauses
|
||||
}
|
||||
|
||||
func (fob TorrentFilesFullOrderBy) Option() query.Option {
|
||||
return query.OrderBy(fob.Clauses()...)
|
||||
}
|
||||
187
internal/database/search/order_torrent_files_enum.go
Normal file
187
internal/database/search/order_torrent_files_enum.go
Normal file
@ -0,0 +1,187 @@
|
||||
// Code generated by go-enum DO NOT EDIT.
|
||||
// Version:
|
||||
// Revision:
|
||||
// Build Date:
|
||||
// Built By:
|
||||
|
||||
package search
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
TorrentFilesOrderByIndex TorrentFilesOrderBy = "index"
|
||||
TorrentFilesOrderByPath TorrentFilesOrderBy = "path"
|
||||
TorrentFilesOrderByExtension TorrentFilesOrderBy = "extension"
|
||||
TorrentFilesOrderBySize TorrentFilesOrderBy = "size"
|
||||
)
|
||||
|
||||
var ErrInvalidTorrentFilesOrderBy = fmt.Errorf("not a valid TorrentFilesOrderBy, try [%s]", strings.Join(_TorrentFilesOrderByNames, ", "))
|
||||
|
||||
var _TorrentFilesOrderByNames = []string{
|
||||
string(TorrentFilesOrderByIndex),
|
||||
string(TorrentFilesOrderByPath),
|
||||
string(TorrentFilesOrderByExtension),
|
||||
string(TorrentFilesOrderBySize),
|
||||
}
|
||||
|
||||
// TorrentFilesOrderByNames returns a list of possible string values of TorrentFilesOrderBy.
|
||||
func TorrentFilesOrderByNames() []string {
|
||||
tmp := make([]string, len(_TorrentFilesOrderByNames))
|
||||
copy(tmp, _TorrentFilesOrderByNames)
|
||||
return tmp
|
||||
}
|
||||
|
||||
// TorrentFilesOrderByValues returns a list of the values for TorrentFilesOrderBy
|
||||
func TorrentFilesOrderByValues() []TorrentFilesOrderBy {
|
||||
return []TorrentFilesOrderBy{
|
||||
TorrentFilesOrderByIndex,
|
||||
TorrentFilesOrderByPath,
|
||||
TorrentFilesOrderByExtension,
|
||||
TorrentFilesOrderBySize,
|
||||
}
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (x TorrentFilesOrderBy) String() string {
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// IsValid provides a quick way to determine if the typed value is
|
||||
// part of the allowed enumerated values
|
||||
func (x TorrentFilesOrderBy) IsValid() bool {
|
||||
_, err := ParseTorrentFilesOrderBy(string(x))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
var _TorrentFilesOrderByValue = map[string]TorrentFilesOrderBy{
|
||||
"index": TorrentFilesOrderByIndex,
|
||||
"path": TorrentFilesOrderByPath,
|
||||
"extension": TorrentFilesOrderByExtension,
|
||||
"size": TorrentFilesOrderBySize,
|
||||
}
|
||||
|
||||
// ParseTorrentFilesOrderBy attempts to convert a string to a TorrentFilesOrderBy.
|
||||
func ParseTorrentFilesOrderBy(name string) (TorrentFilesOrderBy, error) {
|
||||
if x, ok := _TorrentFilesOrderByValue[name]; ok {
|
||||
return x, nil
|
||||
}
|
||||
// Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to.
|
||||
if x, ok := _TorrentFilesOrderByValue[strings.ToLower(name)]; ok {
|
||||
return x, nil
|
||||
}
|
||||
return TorrentFilesOrderBy(""), fmt.Errorf("%s is %w", name, ErrInvalidTorrentFilesOrderBy)
|
||||
}
|
||||
|
||||
// MarshalText implements the text marshaller method.
|
||||
func (x TorrentFilesOrderBy) MarshalText() ([]byte, error) {
|
||||
return []byte(string(x)), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the text unmarshaller method.
|
||||
func (x *TorrentFilesOrderBy) UnmarshalText(text []byte) error {
|
||||
tmp, err := ParseTorrentFilesOrderBy(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = tmp
|
||||
return nil
|
||||
}
|
||||
|
||||
var errTorrentFilesOrderByNilPtr = errors.New("value pointer is nil") // one per type for package clashes
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *TorrentFilesOrderBy) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
*x = TorrentFilesOrderBy("")
|
||||
return
|
||||
}
|
||||
|
||||
// A wider range of scannable types.
|
||||
// driver.Value values at the top of the list for expediency
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
*x, err = ParseTorrentFilesOrderBy(v)
|
||||
case []byte:
|
||||
*x, err = ParseTorrentFilesOrderBy(string(v))
|
||||
case TorrentFilesOrderBy:
|
||||
*x = v
|
||||
case *TorrentFilesOrderBy:
|
||||
if v == nil {
|
||||
return errTorrentFilesOrderByNilPtr
|
||||
}
|
||||
*x = *v
|
||||
case *string:
|
||||
if v == nil {
|
||||
return errTorrentFilesOrderByNilPtr
|
||||
}
|
||||
*x, err = ParseTorrentFilesOrderBy(*v)
|
||||
default:
|
||||
return errors.New("invalid type for TorrentFilesOrderBy")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x TorrentFilesOrderBy) Value() (driver.Value, error) {
|
||||
return x.String(), nil
|
||||
}
|
||||
|
||||
type NullTorrentFilesOrderBy struct {
|
||||
TorrentFilesOrderBy TorrentFilesOrderBy
|
||||
Valid bool
|
||||
Set bool
|
||||
}
|
||||
|
||||
func NewNullTorrentFilesOrderBy(val interface{}) (x NullTorrentFilesOrderBy) {
|
||||
err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
_ = err // make any errcheck linters happy
|
||||
return
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *NullTorrentFilesOrderBy) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
x.TorrentFilesOrderBy, x.Valid = TorrentFilesOrderBy(""), false
|
||||
return
|
||||
}
|
||||
|
||||
err = x.TorrentFilesOrderBy.Scan(value)
|
||||
x.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullTorrentFilesOrderBy) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return x.TorrentFilesOrderBy.String(), nil
|
||||
}
|
||||
|
||||
// MarshalJSON correctly serializes a NullTorrentFilesOrderBy to JSON.
|
||||
func (n NullTorrentFilesOrderBy) MarshalJSON() ([]byte, error) {
|
||||
const nullStr = "null"
|
||||
if n.Valid {
|
||||
return json.Marshal(n.TorrentFilesOrderBy)
|
||||
}
|
||||
return []byte(nullStr), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON correctly deserializes a NullTorrentFilesOrderBy from JSON.
|
||||
func (n *NullTorrentFilesOrderBy) UnmarshalJSON(b []byte) error {
|
||||
n.Set = true
|
||||
var x interface{}
|
||||
err := json.Unmarshal(b, &x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = n.Scan(x)
|
||||
return err
|
||||
}
|
||||
@ -8,8 +8,10 @@ import (
|
||||
|
||||
type Search interface {
|
||||
ContentSearch
|
||||
QueueJobSearch
|
||||
TorrentSearch
|
||||
TorrentContentSearch
|
||||
TorrentFilesSearch
|
||||
}
|
||||
|
||||
type search struct {
|
||||
|
||||
28
internal/database/search/search_queue_jobs.go
Normal file
28
internal/database/search/search_queue_jobs.go
Normal file
@ -0,0 +1,28 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
)
|
||||
|
||||
type QueueJobResult = query.GenericResult[model.QueueJob]
|
||||
|
||||
type QueueJobSearch interface {
|
||||
QueueJobs(ctx context.Context, options ...query.Option) (result QueueJobResult, err error)
|
||||
}
|
||||
|
||||
func (s search) QueueJobs(ctx context.Context, options ...query.Option) (result QueueJobResult, err error) {
|
||||
return query.GenericQuery[model.QueueJob](
|
||||
ctx,
|
||||
s.q,
|
||||
query.Options(append([]query.Option{query.SelectAll()}, options...)...),
|
||||
model.TableNameQueueJob,
|
||||
func(ctx context.Context, q *dao.Query) query.SubQuery {
|
||||
return query.GenericSubQuery[dao.IQueueJobDo]{
|
||||
SubQuery: q.QueueJob.WithContext(ctx).ReadDB(),
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -49,23 +49,6 @@ func TorrentContentDefaultOption() query.Option {
|
||||
Desc: true,
|
||||
},
|
||||
},
|
||||
query.OrderByColumn{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: clause.CurrentTable,
|
||||
Name: "updated_at",
|
||||
},
|
||||
Desc: true,
|
||||
},
|
||||
},
|
||||
query.OrderByColumn{
|
||||
OrderByColumn: clause.OrderByColumn{
|
||||
Column: clause.Column{
|
||||
Table: clause.CurrentTable,
|
||||
Name: "info_hash",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
28
internal/database/search/search_torrent_files.go
Normal file
28
internal/database/search/search_torrent_files.go
Normal file
@ -0,0 +1,28 @@
|
||||
package search
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
)
|
||||
|
||||
type TorrentFilesResult = query.GenericResult[model.TorrentFile]
|
||||
|
||||
type TorrentFilesSearch interface {
|
||||
TorrentFiles(ctx context.Context, options ...query.Option) (TorrentFilesResult, error)
|
||||
}
|
||||
|
||||
func (s search) TorrentFiles(ctx context.Context, options ...query.Option) (TorrentFilesResult, error) {
|
||||
return query.GenericQuery[model.TorrentFile](
|
||||
ctx,
|
||||
s.q,
|
||||
query.Options(append([]query.Option{query.SelectAll()}, options...)...),
|
||||
model.TableNameTorrentFile,
|
||||
func(ctx context.Context, q *dao.Query) query.SubQuery {
|
||||
return query.GenericSubQuery[dao.ITorrentFileDo]{
|
||||
SubQuery: q.TorrentFile.WithContext(ctx).ReadDB(),
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -32,18 +32,6 @@ func (s search) Torrents(ctx context.Context, options ...query.Option) (Torrents
|
||||
)
|
||||
}
|
||||
|
||||
func TorrentDefaultPreload() query.Option {
|
||||
return query.Preload(func(q *dao.Query) []field.RelationField {
|
||||
return []field.RelationField{
|
||||
q.Torrent.Sources.RelationField,
|
||||
q.Torrent.Sources.TorrentSource.RelationField,
|
||||
q.Torrent.Files.RelationField,
|
||||
q.Torrent.Hint.RelationField,
|
||||
q.Torrent.Tags.RelationField,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type TorrentsWithMissingInfoHashesResult struct {
|
||||
Torrents []model.Torrent
|
||||
MissingInfoHashes []protocol.ID
|
||||
|
||||
40
internal/dhtcrawler/dhtcrawler_health_check/check.go
Normal file
40
internal/dhtcrawler/dhtcrawler_health_check/check.go
Normal file
@ -0,0 +1,40 @@
|
||||
package dhtcrawler_health_check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/concurrency"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol/dht/server"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewCheck(
|
||||
dhtCrawlerActive *concurrency.AtomicValue[bool],
|
||||
lastResponses *concurrency.AtomicValue[server.LastResponses],
|
||||
) health.Check {
|
||||
return health.Check{
|
||||
Name: "dht",
|
||||
IsActive: func() bool {
|
||||
return dhtCrawlerActive.Get()
|
||||
},
|
||||
Timeout: time.Second,
|
||||
Check: func(ctx context.Context) error {
|
||||
lr := lastResponses.Get()
|
||||
if lr.StartTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
now := time.Now()
|
||||
if lr.LastSuccess.IsZero() {
|
||||
if now.Sub(lr.StartTime) < 30*time.Second {
|
||||
return nil
|
||||
}
|
||||
return errors.New("no response within 30 seconds")
|
||||
}
|
||||
if now.Sub(lr.LastSuccess) > time.Minute {
|
||||
return errors.New("no successful responses within last minute")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
30
internal/dhtcrawler/dhtcrawler_health_check/factory.go
Normal file
30
internal/dhtcrawler/dhtcrawler_health_check/factory.go
Normal file
@ -0,0 +1,30 @@
|
||||
package dhtcrawler_health_check
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/concurrency"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol/dht/server"
|
||||
"go.uber.org/fx"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Params struct {
|
||||
fx.In
|
||||
DhtCrawlerActive *concurrency.AtomicValue[bool] `name:"dht_crawler_active"`
|
||||
DhtServerLastResponses *concurrency.AtomicValue[server.LastResponses] `name:"dht_server_last_responses"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Option health.CheckerOption `group:"health_check_options"`
|
||||
}
|
||||
|
||||
func New(params Params) Result {
|
||||
return Result{
|
||||
Option: health.WithPeriodicCheck(
|
||||
time.Second*10,
|
||||
time.Second*1,
|
||||
NewCheck(params.DhtCrawlerActive, params.DhtServerLastResponses),
|
||||
),
|
||||
}
|
||||
}
|
||||
@ -4,6 +4,7 @@ import (
|
||||
adht "github.com/anacrolix/dht/v2"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/config/configfx"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/dhtcrawler"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/dhtcrawler/dhtcrawler_health_check"
|
||||
"go.uber.org/fx"
|
||||
"net"
|
||||
"net/netip"
|
||||
@ -30,6 +31,7 @@ func New() fx.Option {
|
||||
},
|
||||
dhtcrawler.New,
|
||||
dhtcrawler.NewDiscoveredNodes,
|
||||
dhtcrawler_health_check.New,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@ -36,11 +36,13 @@ type Params struct {
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Worker worker.Worker `group:"workers"`
|
||||
PersistedTotal prometheus.Collector `group:"prometheus_collectors"`
|
||||
Worker worker.Worker `group:"workers"`
|
||||
PersistedTotal prometheus.Collector `group:"prometheus_collectors"`
|
||||
DhtCrawlerActive *concurrency.AtomicValue[bool] `name:"dht_crawler_active"`
|
||||
}
|
||||
|
||||
func New(params Params) Result {
|
||||
active := &concurrency.AtomicValue[bool]{}
|
||||
var c crawler
|
||||
persistedTotal := prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "bitmagnet",
|
||||
@ -53,6 +55,7 @@ func New(params Params) Result {
|
||||
"dht_crawler",
|
||||
fx.Hook{
|
||||
OnStart: func(context.Context) error {
|
||||
active.Set(true)
|
||||
scalingFactor := int(params.Config.ScalingFactor)
|
||||
cl, err := params.Client.Get()
|
||||
if err != nil {
|
||||
@ -111,6 +114,7 @@ func New(params Params) Result {
|
||||
return nil
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
active.Set(false)
|
||||
if c.stopped != nil {
|
||||
close(c.stopped)
|
||||
}
|
||||
@ -118,6 +122,7 @@ func New(params Params) Result {
|
||||
},
|
||||
},
|
||||
),
|
||||
PersistedTotal: persistedTotal,
|
||||
PersistedTotal: persistedTotal,
|
||||
DhtCrawlerActive: active,
|
||||
}
|
||||
}
|
||||
|
||||
1
internal/dhtcrawler/metrics/metrics.go
Normal file
1
internal/dhtcrawler/metrics/metrics.go
Normal file
@ -0,0 +1 @@
|
||||
package metrics
|
||||
@ -3,12 +3,13 @@ package config
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/lazy"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/resolvers"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
type Params struct {
|
||||
fx.In
|
||||
ResolverRoot lazy.Lazy[gql.ResolverRoot]
|
||||
ResolverRoot lazy.Lazy[*resolvers.Resolver]
|
||||
}
|
||||
|
||||
func New(p Params) lazy.Lazy[gql.Config] {
|
||||
|
||||
@ -28,5 +28,7 @@ var Enums = []enum{
|
||||
newEnum("VideoModifier", model.VideoModifierNames()),
|
||||
newEnum("VideoResolution", model.VideoResolutionNames()),
|
||||
newEnum("VideoSource", model.VideoSourceNames()),
|
||||
newEnum("TorrentContentOrderBy", search.TorrentContentOrderByNames()),
|
||||
newEnum("TorrentContentOrderByField", search.TorrentContentOrderByNames()),
|
||||
newEnum("TorrentFilesOrderByField", search.TorrentFilesOrderByNames()),
|
||||
newEnum("QueueJobsOrderByField", search.QueueJobsOrderByNames()),
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -3,12 +3,17 @@ package gqlfx
|
||||
import (
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/lazy"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/worker"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/config"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/httpserver"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/resolvers"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/queuemetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/queue/manager"
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
@ -18,22 +23,6 @@ func New() fx.Option {
|
||||
fx.Provide(
|
||||
config.New,
|
||||
httpserver.New,
|
||||
func(
|
||||
ls lazy.Lazy[search.Search],
|
||||
ld lazy.Lazy[*dao.Query],
|
||||
) lazy.Lazy[gql.ResolverRoot] {
|
||||
return lazy.New(func() (gql.ResolverRoot, error) {
|
||||
s, err := ls.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, err := ld.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvers.New(d, s), nil
|
||||
})
|
||||
},
|
||||
func(
|
||||
lcfg lazy.Lazy[gql.Config],
|
||||
) lazy.Lazy[graphql.ExecutableSchema] {
|
||||
@ -46,5 +35,70 @@ func New() fx.Option {
|
||||
})
|
||||
},
|
||||
),
|
||||
fx.Provide(
|
||||
func(p Params) Result {
|
||||
return Result{
|
||||
Resolver: lazy.New(func() (*resolvers.Resolver, error) {
|
||||
ch, err := p.Checker.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s, err := p.Search.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, err := p.Dao.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qmc, err := p.QueueMetricsClient.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qm, err := p.QueueManager.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tm, err := p.TorrentMetricsClient.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resolvers.Resolver{
|
||||
Dao: d,
|
||||
Search: s,
|
||||
Checker: ch,
|
||||
QueueMetricsClient: qmc,
|
||||
QueueManager: qm,
|
||||
TorrentMetricsClient: tm,
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
},
|
||||
),
|
||||
// inject resolver dependencies avoiding a circular dependency:
|
||||
fx.Invoke(func(
|
||||
resolver lazy.Lazy[*resolvers.Resolver],
|
||||
workers worker.Registry,
|
||||
) {
|
||||
resolver.Decorate(func(r *resolvers.Resolver) (*resolvers.Resolver, error) {
|
||||
r.Workers = workers
|
||||
return r, nil
|
||||
})
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
type Params struct {
|
||||
fx.In
|
||||
Search lazy.Lazy[search.Search]
|
||||
Dao lazy.Lazy[*dao.Query]
|
||||
Checker lazy.Lazy[health.Checker]
|
||||
QueueMetricsClient lazy.Lazy[queuemetrics.Client]
|
||||
QueueManager lazy.Lazy[manager.Manager]
|
||||
TorrentMetricsClient lazy.Lazy[torrentmetrics.Client]
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
fx.Out
|
||||
Resolver lazy.Lazy[*resolvers.Resolver]
|
||||
}
|
||||
|
||||
@ -110,6 +110,9 @@ models:
|
||||
DateTime:
|
||||
model:
|
||||
- github.com/99designs/gqlgen/graphql.Time
|
||||
Duration:
|
||||
model:
|
||||
- github.com/99designs/gqlgen/graphql.Duration
|
||||
Hash20:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/protocol.ID
|
||||
@ -153,6 +156,9 @@ models:
|
||||
TorrentContentResult:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/database/search.TorrentContentResult
|
||||
TorrentFilesQueryResult:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/database/search.TorrentFilesResult
|
||||
TorrentSuggestTagsResult:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/database/search.TorrentSuggestTagsResult
|
||||
@ -162,3 +168,15 @@ models:
|
||||
FacetAggregationInput:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/database/query.FacetAggregationConfig
|
||||
QueueMetricsBucket:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/metrics/queuemetrics.Bucket
|
||||
QueuePurgeJobsInput:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/queue/manager.PurgeJobsRequest
|
||||
QueueEnqueueReprocessTorrentsBatchInput:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/queue/manager.EnqueueReprocessTorrentsBatchRequest
|
||||
TorrentMetricsBucket:
|
||||
model:
|
||||
- github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics.Bucket
|
||||
|
||||
@ -134,6 +134,32 @@ func videoSourceFacet(input gen.VideoSourceFacetInput) q.Facet {
|
||||
return facet(input.Aggregate, graphql.Omittable[*model.FacetLogic]{}, input.Filter, search.VideoSourceFacet)
|
||||
}
|
||||
|
||||
func queueJobQueueFacet(input gen.QueueJobQueueFacetInput) q.Facet {
|
||||
var filter graphql.Omittable[[]*string]
|
||||
if f, ok := input.Filter.ValueOK(); ok {
|
||||
filterValues := make([]*string, 0, len(f))
|
||||
for _, v := range f {
|
||||
vv := v
|
||||
filterValues = append(filterValues, &vv)
|
||||
}
|
||||
filter = graphql.OmittableOf[[]*string](filterValues)
|
||||
}
|
||||
return facet(input.Aggregate, graphql.Omittable[*model.FacetLogic]{}, filter, search.QueueJobQueueFacet)
|
||||
}
|
||||
|
||||
func queueJobStatusFacet(input gen.QueueJobStatusFacetInput) q.Facet {
|
||||
var filter graphql.Omittable[[]*model.QueueJobStatus]
|
||||
if f, ok := input.Filter.ValueOK(); ok {
|
||||
filterValues := make([]*model.QueueJobStatus, 0, len(f))
|
||||
for _, v := range f {
|
||||
vv := v
|
||||
filterValues = append(filterValues, &vv)
|
||||
}
|
||||
filter = graphql.OmittableOf[[]*model.QueueJobStatus](filterValues)
|
||||
}
|
||||
return facet(input.Aggregate, graphql.Omittable[*model.FacetLogic]{}, filter, search.QueueJobStatusFacet)
|
||||
}
|
||||
|
||||
func aggs[T any, Agg comparable](
|
||||
items q.AggregationItems,
|
||||
parse func(string) (T, error),
|
||||
@ -220,3 +246,15 @@ func videoSourceAggs(items q.AggregationItems) ([]gen.VideoSourceAgg, error) {
|
||||
return gen.VideoSourceAgg{Value: value, Label: label, Count: int(count), IsEstimate: isEstimate}
|
||||
})
|
||||
}
|
||||
|
||||
func queueJobQueueAggs(items q.AggregationItems) ([]gen.QueueJobQueueAgg, error) {
|
||||
return aggs(items, func(s string) (string, error) { return s, nil }, func(value *string, label string, count uint, isEstimate bool) gen.QueueJobQueueAgg {
|
||||
return gen.QueueJobQueueAgg{Value: *value, Label: label, Count: int(count)}
|
||||
})
|
||||
}
|
||||
|
||||
func queueJobStatusAggs(items q.AggregationItems) ([]gen.QueueJobStatusAgg, error) {
|
||||
return aggs(items, model.ParseQueueJobStatus, func(value *model.QueueJobStatus, label string, count uint, isEstimate bool) gen.QueueJobStatusAgg {
|
||||
return gen.QueueJobStatusAgg{Value: *value, Label: label, Count: int(count)}
|
||||
})
|
||||
}
|
||||
|
||||
@ -6,8 +6,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/queuemetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
)
|
||||
|
||||
@ -36,6 +39,18 @@ type GenreFacetInput struct {
|
||||
Filter graphql.Omittable[[]string] `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
type HealthCheck struct {
|
||||
Key string `json:"key"`
|
||||
Status HealthStatus `json:"status"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Error *string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type HealthQuery struct {
|
||||
Status HealthStatus `json:"status"`
|
||||
Checks []HealthCheck `json:"checks"`
|
||||
}
|
||||
|
||||
type LanguageAgg struct {
|
||||
Value model.Language `json:"value"`
|
||||
Label string `json:"label"`
|
||||
@ -54,6 +69,55 @@ type Mutation struct {
|
||||
type Query struct {
|
||||
}
|
||||
|
||||
type QueueJobQueueAgg struct {
|
||||
Value string `json:"value"`
|
||||
Label string `json:"label"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type QueueJobQueueFacetInput struct {
|
||||
Aggregate graphql.Omittable[*bool] `json:"aggregate,omitempty"`
|
||||
Filter graphql.Omittable[[]string] `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
type QueueJobStatusAgg struct {
|
||||
Value model.QueueJobStatus `json:"value"`
|
||||
Label string `json:"label"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type QueueJobStatusFacetInput struct {
|
||||
Aggregate graphql.Omittable[*bool] `json:"aggregate,omitempty"`
|
||||
Filter graphql.Omittable[[]model.QueueJobStatus] `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
type QueueJobsAggregations struct {
|
||||
Queue []QueueJobQueueAgg `json:"queue,omitempty"`
|
||||
Status []QueueJobStatusAgg `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type QueueJobsFacetsInput struct {
|
||||
Status graphql.Omittable[*QueueJobStatusFacetInput] `json:"status,omitempty"`
|
||||
Queue graphql.Omittable[*QueueJobQueueFacetInput] `json:"queue,omitempty"`
|
||||
}
|
||||
|
||||
type QueueJobsOrderByInput struct {
|
||||
Field QueueJobsOrderByField `json:"field"`
|
||||
Descending graphql.Omittable[*bool] `json:"descending,omitempty"`
|
||||
}
|
||||
|
||||
type QueueMetricsQueryInput struct {
|
||||
BucketDuration MetricsBucketDuration `json:"bucketDuration"`
|
||||
Statuses graphql.Omittable[[]model.QueueJobStatus] `json:"statuses,omitempty"`
|
||||
Queues graphql.Omittable[[]string] `json:"queues,omitempty"`
|
||||
StartTime graphql.Omittable[*time.Time] `json:"startTime,omitempty"`
|
||||
EndTime graphql.Omittable[*time.Time] `json:"endTime,omitempty"`
|
||||
}
|
||||
|
||||
type QueueMetricsQueryResult struct {
|
||||
Buckets []queuemetrics.Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
type ReleaseYearAgg struct {
|
||||
Value *model.Year `json:"value,omitempty"`
|
||||
Label string `json:"label"`
|
||||
@ -71,10 +135,6 @@ type SuggestTagsQueryInput struct {
|
||||
Exclusions graphql.Omittable[[]string] `json:"exclusions,omitempty"`
|
||||
}
|
||||
|
||||
type SystemQuery struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type TorrentContentAggregations struct {
|
||||
ContentType []ContentTypeAgg `json:"contentType,omitempty"`
|
||||
TorrentSource []TorrentSourceAgg `json:"torrentSource,omitempty"`
|
||||
@ -100,8 +160,8 @@ type TorrentContentFacetsInput struct {
|
||||
}
|
||||
|
||||
type TorrentContentOrderByInput struct {
|
||||
Field TorrentContentOrderBy `json:"field"`
|
||||
Descending graphql.Omittable[*bool] `json:"descending,omitempty"`
|
||||
Field TorrentContentOrderByField `json:"field"`
|
||||
Descending graphql.Omittable[*bool] `json:"descending,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentFileTypeAgg struct {
|
||||
@ -117,6 +177,26 @@ type TorrentFileTypeFacetInput struct {
|
||||
Filter graphql.Omittable[[]model.FileType] `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentFilesOrderByInput struct {
|
||||
Field TorrentFilesOrderByField `json:"field"`
|
||||
Descending graphql.Omittable[*bool] `json:"descending,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentListSourcesResult struct {
|
||||
Sources []model.TorrentSource `json:"sources"`
|
||||
}
|
||||
|
||||
type TorrentMetricsQueryInput struct {
|
||||
BucketDuration MetricsBucketDuration `json:"bucketDuration"`
|
||||
Sources graphql.Omittable[[]string] `json:"sources,omitempty"`
|
||||
StartTime graphql.Omittable[*time.Time] `json:"startTime,omitempty"`
|
||||
EndTime graphql.Omittable[*time.Time] `json:"endTime,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentMetricsQueryResult struct {
|
||||
Buckets []torrentmetrics.Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
type TorrentSourceAgg struct {
|
||||
Value string `json:"value"`
|
||||
Label string `json:"label"`
|
||||
@ -167,57 +247,246 @@ type VideoSourceFacetInput struct {
|
||||
Filter graphql.Omittable[[]*model.VideoSource] `json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentContentOrderBy string
|
||||
|
||||
const (
|
||||
TorrentContentOrderByRelevance TorrentContentOrderBy = "Relevance"
|
||||
TorrentContentOrderByPublishedAt TorrentContentOrderBy = "PublishedAt"
|
||||
TorrentContentOrderByUpdatedAt TorrentContentOrderBy = "UpdatedAt"
|
||||
TorrentContentOrderBySize TorrentContentOrderBy = "Size"
|
||||
TorrentContentOrderByFiles TorrentContentOrderBy = "Files"
|
||||
TorrentContentOrderBySeeders TorrentContentOrderBy = "Seeders"
|
||||
TorrentContentOrderByLeechers TorrentContentOrderBy = "Leechers"
|
||||
TorrentContentOrderByName TorrentContentOrderBy = "Name"
|
||||
TorrentContentOrderByInfoHash TorrentContentOrderBy = "InfoHash"
|
||||
)
|
||||
|
||||
var AllTorrentContentOrderBy = []TorrentContentOrderBy{
|
||||
TorrentContentOrderByRelevance,
|
||||
TorrentContentOrderByPublishedAt,
|
||||
TorrentContentOrderByUpdatedAt,
|
||||
TorrentContentOrderBySize,
|
||||
TorrentContentOrderByFiles,
|
||||
TorrentContentOrderBySeeders,
|
||||
TorrentContentOrderByLeechers,
|
||||
TorrentContentOrderByName,
|
||||
TorrentContentOrderByInfoHash,
|
||||
type Worker struct {
|
||||
Key string `json:"key"`
|
||||
Started bool `json:"started"`
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderBy) IsValid() bool {
|
||||
type WorkersListAllQueryResult struct {
|
||||
Workers []Worker `json:"workers"`
|
||||
}
|
||||
|
||||
type WorkersQuery struct {
|
||||
ListAll WorkersListAllQueryResult `json:"listAll"`
|
||||
}
|
||||
|
||||
type HealthStatus string
|
||||
|
||||
const (
|
||||
HealthStatusUnknown HealthStatus = "unknown"
|
||||
HealthStatusInactive HealthStatus = "inactive"
|
||||
HealthStatusUp HealthStatus = "up"
|
||||
HealthStatusDown HealthStatus = "down"
|
||||
)
|
||||
|
||||
var AllHealthStatus = []HealthStatus{
|
||||
HealthStatusUnknown,
|
||||
HealthStatusInactive,
|
||||
HealthStatusUp,
|
||||
HealthStatusDown,
|
||||
}
|
||||
|
||||
func (e HealthStatus) IsValid() bool {
|
||||
switch e {
|
||||
case TorrentContentOrderByRelevance, TorrentContentOrderByPublishedAt, TorrentContentOrderByUpdatedAt, TorrentContentOrderBySize, TorrentContentOrderByFiles, TorrentContentOrderBySeeders, TorrentContentOrderByLeechers, TorrentContentOrderByName, TorrentContentOrderByInfoHash:
|
||||
case HealthStatusUnknown, HealthStatusInactive, HealthStatusUp, HealthStatusDown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderBy) String() string {
|
||||
func (e HealthStatus) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *TorrentContentOrderBy) UnmarshalGQL(v interface{}) error {
|
||||
func (e *HealthStatus) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = TorrentContentOrderBy(str)
|
||||
*e = HealthStatus(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid TorrentContentOrderBy", str)
|
||||
return fmt.Errorf("%s is not a valid HealthStatus", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderBy) MarshalGQL(w io.Writer) {
|
||||
func (e HealthStatus) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
type MetricsBucketDuration string
|
||||
|
||||
const (
|
||||
MetricsBucketDurationMinute MetricsBucketDuration = "minute"
|
||||
MetricsBucketDurationHour MetricsBucketDuration = "hour"
|
||||
MetricsBucketDurationDay MetricsBucketDuration = "day"
|
||||
)
|
||||
|
||||
var AllMetricsBucketDuration = []MetricsBucketDuration{
|
||||
MetricsBucketDurationMinute,
|
||||
MetricsBucketDurationHour,
|
||||
MetricsBucketDurationDay,
|
||||
}
|
||||
|
||||
func (e MetricsBucketDuration) IsValid() bool {
|
||||
switch e {
|
||||
case MetricsBucketDurationMinute, MetricsBucketDurationHour, MetricsBucketDurationDay:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e MetricsBucketDuration) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *MetricsBucketDuration) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = MetricsBucketDuration(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid MetricsBucketDuration", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e MetricsBucketDuration) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
type QueueJobsOrderByField string
|
||||
|
||||
const (
|
||||
QueueJobsOrderByFieldCreatedAt QueueJobsOrderByField = "created_at"
|
||||
QueueJobsOrderByFieldRanAt QueueJobsOrderByField = "ran_at"
|
||||
QueueJobsOrderByFieldPriority QueueJobsOrderByField = "priority"
|
||||
)
|
||||
|
||||
var AllQueueJobsOrderByField = []QueueJobsOrderByField{
|
||||
QueueJobsOrderByFieldCreatedAt,
|
||||
QueueJobsOrderByFieldRanAt,
|
||||
QueueJobsOrderByFieldPriority,
|
||||
}
|
||||
|
||||
func (e QueueJobsOrderByField) IsValid() bool {
|
||||
switch e {
|
||||
case QueueJobsOrderByFieldCreatedAt, QueueJobsOrderByFieldRanAt, QueueJobsOrderByFieldPriority:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e QueueJobsOrderByField) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *QueueJobsOrderByField) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = QueueJobsOrderByField(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid QueueJobsOrderByField", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e QueueJobsOrderByField) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
type TorrentContentOrderByField string
|
||||
|
||||
const (
|
||||
TorrentContentOrderByFieldRelevance TorrentContentOrderByField = "relevance"
|
||||
TorrentContentOrderByFieldPublishedAt TorrentContentOrderByField = "published_at"
|
||||
TorrentContentOrderByFieldUpdatedAt TorrentContentOrderByField = "updated_at"
|
||||
TorrentContentOrderByFieldSize TorrentContentOrderByField = "size"
|
||||
TorrentContentOrderByFieldFilesCount TorrentContentOrderByField = "files_count"
|
||||
TorrentContentOrderByFieldSeeders TorrentContentOrderByField = "seeders"
|
||||
TorrentContentOrderByFieldLeechers TorrentContentOrderByField = "leechers"
|
||||
TorrentContentOrderByFieldName TorrentContentOrderByField = "name"
|
||||
TorrentContentOrderByFieldInfoHash TorrentContentOrderByField = "info_hash"
|
||||
)
|
||||
|
||||
var AllTorrentContentOrderByField = []TorrentContentOrderByField{
|
||||
TorrentContentOrderByFieldRelevance,
|
||||
TorrentContentOrderByFieldPublishedAt,
|
||||
TorrentContentOrderByFieldUpdatedAt,
|
||||
TorrentContentOrderByFieldSize,
|
||||
TorrentContentOrderByFieldFilesCount,
|
||||
TorrentContentOrderByFieldSeeders,
|
||||
TorrentContentOrderByFieldLeechers,
|
||||
TorrentContentOrderByFieldName,
|
||||
TorrentContentOrderByFieldInfoHash,
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderByField) IsValid() bool {
|
||||
switch e {
|
||||
case TorrentContentOrderByFieldRelevance, TorrentContentOrderByFieldPublishedAt, TorrentContentOrderByFieldUpdatedAt, TorrentContentOrderByFieldSize, TorrentContentOrderByFieldFilesCount, TorrentContentOrderByFieldSeeders, TorrentContentOrderByFieldLeechers, TorrentContentOrderByFieldName, TorrentContentOrderByFieldInfoHash:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderByField) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *TorrentContentOrderByField) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = TorrentContentOrderByField(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid TorrentContentOrderByField", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e TorrentContentOrderByField) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
type TorrentFilesOrderByField string
|
||||
|
||||
const (
|
||||
TorrentFilesOrderByFieldIndex TorrentFilesOrderByField = "index"
|
||||
TorrentFilesOrderByFieldPath TorrentFilesOrderByField = "path"
|
||||
TorrentFilesOrderByFieldExtension TorrentFilesOrderByField = "extension"
|
||||
TorrentFilesOrderByFieldSize TorrentFilesOrderByField = "size"
|
||||
)
|
||||
|
||||
var AllTorrentFilesOrderByField = []TorrentFilesOrderByField{
|
||||
TorrentFilesOrderByFieldIndex,
|
||||
TorrentFilesOrderByFieldPath,
|
||||
TorrentFilesOrderByFieldExtension,
|
||||
TorrentFilesOrderByFieldSize,
|
||||
}
|
||||
|
||||
func (e TorrentFilesOrderByField) IsValid() bool {
|
||||
switch e {
|
||||
case TorrentFilesOrderByFieldIndex, TorrentFilesOrderByFieldPath, TorrentFilesOrderByFieldExtension, TorrentFilesOrderByFieldSize:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e TorrentFilesOrderByField) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *TorrentFilesOrderByField) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = TorrentFilesOrderByField(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid TorrentFilesOrderByField", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e TorrentFilesOrderByField) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
118
internal/gql/gqlmodel/queue_jobs.go
Normal file
118
internal/gql/gqlmodel/queue_jobs.go
Normal file
@ -0,0 +1,118 @@
|
||||
package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
q "github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/maps"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
)
|
||||
|
||||
type QueueJobsQueryInput struct {
|
||||
Queues []string
|
||||
Statuses []model.QueueJobStatus
|
||||
Limit model.NullUint
|
||||
Page model.NullUint
|
||||
Offset model.NullUint
|
||||
TotalCount model.NullBool
|
||||
HasNextPage model.NullBool
|
||||
Facets *gen.QueueJobsFacetsInput
|
||||
OrderBy []gen.QueueJobsOrderByInput
|
||||
}
|
||||
|
||||
type QueueJobsQueryResult struct {
|
||||
TotalCount uint
|
||||
HasNextPage bool
|
||||
Items []model.QueueJob
|
||||
Aggregations gen.QueueJobsAggregations
|
||||
}
|
||||
|
||||
func (r QueueQuery) Jobs(
|
||||
ctx context.Context,
|
||||
query QueueJobsQueryInput,
|
||||
) (QueueJobsQueryResult, error) {
|
||||
limit := uint(10)
|
||||
if query.Limit.Valid {
|
||||
limit = query.Limit.Uint
|
||||
}
|
||||
options := []q.Option{
|
||||
q.SearchParams{
|
||||
Limit: model.NullUint{Valid: true, Uint: limit},
|
||||
Page: query.Page,
|
||||
Offset: query.Offset,
|
||||
TotalCount: query.TotalCount,
|
||||
HasNextPage: query.HasNextPage,
|
||||
}.Option(),
|
||||
}
|
||||
if query.Facets != nil {
|
||||
var qFacets []q.Facet
|
||||
if queue, ok := query.Facets.Queue.ValueOK(); ok {
|
||||
qFacets = append(qFacets, queueJobQueueFacet(*queue))
|
||||
}
|
||||
if status, ok := query.Facets.Status.ValueOK(); ok {
|
||||
qFacets = append(qFacets, queueJobStatusFacet(*status))
|
||||
}
|
||||
options = append(options, q.WithFacet(qFacets...))
|
||||
}
|
||||
var criteria []q.Criteria
|
||||
if query.Queues != nil {
|
||||
criteria = append(criteria, search.QueueJobQueueCriteria(query.Queues...))
|
||||
}
|
||||
if query.Statuses != nil {
|
||||
criteria = append(criteria, search.QueueJobStatusCriteria(query.Statuses...))
|
||||
}
|
||||
if len(criteria) > 0 {
|
||||
options = append(options, q.Where(criteria...))
|
||||
}
|
||||
fullOrderBy := maps.NewInsertMap[search.QueueJobsOrderBy, search.OrderDirection]()
|
||||
for _, ob := range query.OrderBy {
|
||||
direction := search.OrderDirectionAscending
|
||||
if desc, ok := ob.Descending.ValueOK(); ok && *desc {
|
||||
direction = search.OrderDirectionDescending
|
||||
}
|
||||
field, err := search.ParseQueueJobsOrderBy(ob.Field.String())
|
||||
if err != nil {
|
||||
return QueueJobsQueryResult{}, err
|
||||
}
|
||||
fullOrderBy.Set(field, direction)
|
||||
}
|
||||
options = append(options, search.QueueJobsFullOrderBy(fullOrderBy).Option())
|
||||
result, resultErr := r.QueueJobSearch.QueueJobs(ctx, options...)
|
||||
if resultErr != nil {
|
||||
return QueueJobsQueryResult{}, resultErr
|
||||
}
|
||||
return transformQueueJobsQueryResult(result)
|
||||
}
|
||||
|
||||
func transformQueueJobsQueryResult(result q.GenericResult[model.QueueJob]) (QueueJobsQueryResult, error) {
|
||||
aggs, err := transformQueueJobsAggregations(result.Aggregations)
|
||||
if err != nil {
|
||||
return QueueJobsQueryResult{}, err
|
||||
}
|
||||
return QueueJobsQueryResult{
|
||||
TotalCount: result.TotalCount,
|
||||
HasNextPage: result.HasNextPage,
|
||||
Items: result.Items,
|
||||
Aggregations: aggs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func transformQueueJobsAggregations(aggs q.Aggregations) (gen.QueueJobsAggregations, error) {
|
||||
a := gen.QueueJobsAggregations{}
|
||||
if queue, ok := aggs[search.QueueJobQueueFacetKey]; ok {
|
||||
agg, err := queueJobQueueAggs(queue.Items)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
a.Queue = agg
|
||||
}
|
||||
if status, ok := aggs[search.QueueJobStatusFacetKey]; ok {
|
||||
agg, err := queueJobStatusAggs(status.Items)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
a.Status = agg
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
62
internal/gql/gqlmodel/queue_metrics.go
Normal file
62
internal/gql/gqlmodel/queue_metrics.go
Normal file
@ -0,0 +1,62 @@
|
||||
package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/queuemetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/queue/manager"
|
||||
)
|
||||
|
||||
type QueueQuery struct {
|
||||
QueueJobSearch search.QueueJobSearch
|
||||
QueueMetricsClient queuemetrics.Client
|
||||
}
|
||||
|
||||
func (q QueueQuery) Metrics(ctx context.Context, input gen.QueueMetricsQueryInput) (*gen.QueueMetricsQueryResult, error) {
|
||||
req := queuemetrics.Request{}
|
||||
switch input.BucketDuration {
|
||||
case gen.MetricsBucketDurationMinute:
|
||||
req.BucketDuration = "minute"
|
||||
case gen.MetricsBucketDurationHour:
|
||||
req.BucketDuration = "hour"
|
||||
case gen.MetricsBucketDurationDay:
|
||||
req.BucketDuration = "day"
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid bucket duration: %s", input.BucketDuration)
|
||||
}
|
||||
if t, ok := input.StartTime.ValueOK(); ok && !t.IsZero() {
|
||||
req.StartTime = *t
|
||||
}
|
||||
if t, ok := input.EndTime.ValueOK(); ok && !t.IsZero() {
|
||||
req.EndTime = *t
|
||||
}
|
||||
if statuses, ok := input.Statuses.ValueOK(); ok {
|
||||
req.Statuses = statuses
|
||||
}
|
||||
if queues, ok := input.Queues.ValueOK(); ok {
|
||||
req.Queues = queues
|
||||
}
|
||||
buckets, err := q.QueueMetricsClient.Request(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gen.QueueMetricsQueryResult{
|
||||
Buckets: buckets,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type QueueMutation struct {
|
||||
QueueManager manager.Manager
|
||||
}
|
||||
|
||||
func (m *QueueMutation) PurgeJobs(ctx context.Context, input manager.PurgeJobsRequest) (*string, error) {
|
||||
err := m.QueueManager.PurgeJobs(ctx, input)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (m *QueueMutation) EnqueueReprocessTorrentsBatch(ctx context.Context, input manager.EnqueueReprocessTorrentsBatchRequest) (*string, error) {
|
||||
err := m.QueueManager.EnqueueReprocessTorrentsBatch(ctx, input)
|
||||
return nil, err
|
||||
}
|
||||
@ -2,25 +2,44 @@ package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
)
|
||||
|
||||
type TorrentQuery struct {
|
||||
TorrentSearch search.TorrentSearch
|
||||
Dao *dao.Query
|
||||
Search search.Search
|
||||
TorrentMetricsClient torrentmetrics.Client
|
||||
}
|
||||
|
||||
func (t TorrentQuery) SuggestTags(ctx context.Context, query *gen.SuggestTagsQueryInput) (search.TorrentSuggestTagsResult, error) {
|
||||
func (t TorrentQuery) SuggestTags(ctx context.Context, input *gen.SuggestTagsQueryInput) (search.TorrentSuggestTagsResult, error) {
|
||||
suggestTagsQuery := search.SuggestTagsQuery{}
|
||||
if query != nil {
|
||||
if prefix, ok := query.Prefix.ValueOK(); ok && prefix != nil {
|
||||
if input != nil {
|
||||
if prefix, ok := input.Prefix.ValueOK(); ok && prefix != nil {
|
||||
suggestTagsQuery.Prefix = *prefix
|
||||
}
|
||||
if exclusions, ok := query.Exclusions.ValueOK(); ok {
|
||||
if exclusions, ok := input.Exclusions.ValueOK(); ok {
|
||||
suggestTagsQuery.Exclusions = exclusions
|
||||
}
|
||||
}
|
||||
return t.TorrentSearch.TorrentSuggestTags(ctx, suggestTagsQuery)
|
||||
return t.Search.TorrentSuggestTags(ctx, suggestTagsQuery)
|
||||
}
|
||||
|
||||
func (t TorrentQuery) ListSources(ctx context.Context) (gen.TorrentListSourcesResult, error) {
|
||||
result, err := t.Dao.TorrentSource.WithContext(ctx).Order(t.Dao.TorrentSource.Key.Asc()).Find()
|
||||
if err != nil {
|
||||
return gen.TorrentListSourcesResult{}, err
|
||||
}
|
||||
sources := make([]model.TorrentSource, len(result))
|
||||
for i := range result {
|
||||
sources[i] = *result[i]
|
||||
}
|
||||
return gen.TorrentListSourcesResult{
|
||||
Sources: sources,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type TorrentMutation struct{}
|
||||
|
||||
@ -2,6 +2,7 @@ package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
q "github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
@ -82,7 +83,7 @@ func NewTorrentContentFromResultItem(item search.TorrentContentResultItem) Torre
|
||||
return c
|
||||
}
|
||||
|
||||
type TorrentSource struct {
|
||||
type TorrentSourceInfo struct {
|
||||
Key string
|
||||
Name string
|
||||
ImportID model.NullString
|
||||
@ -90,10 +91,10 @@ type TorrentSource struct {
|
||||
Leechers model.NullUint
|
||||
}
|
||||
|
||||
func TorrentSourcesFromTorrent(t model.Torrent) []TorrentSource {
|
||||
var sources []TorrentSource
|
||||
func TorrentSourceInfosFromTorrent(t model.Torrent) []TorrentSourceInfo {
|
||||
var sources []TorrentSourceInfo
|
||||
for _, s := range t.Sources {
|
||||
sources = append(sources, TorrentSource{
|
||||
sources = append(sources, TorrentSourceInfo{
|
||||
Key: s.Source,
|
||||
Name: s.TorrentSource.Name,
|
||||
ImportID: s.ImportID,
|
||||
@ -104,6 +105,13 @@ func TorrentSourcesFromTorrent(t model.Torrent) []TorrentSource {
|
||||
return sources
|
||||
}
|
||||
|
||||
type TorrentContentSearchQueryInput struct {
|
||||
q.SearchParams
|
||||
Facets *gen.TorrentContentFacetsInput
|
||||
OrderBy []gen.TorrentContentOrderByInput
|
||||
InfoHashes graphql.Omittable[[]protocol.ID]
|
||||
}
|
||||
|
||||
type TorrentContentSearchResult struct {
|
||||
TotalCount uint
|
||||
TotalCountIsEstimate bool
|
||||
@ -114,52 +122,53 @@ type TorrentContentSearchResult struct {
|
||||
|
||||
func (t TorrentContentQuery) Search(
|
||||
ctx context.Context,
|
||||
query *q.SearchParams,
|
||||
facets *gen.TorrentContentFacetsInput,
|
||||
orderBy []gen.TorrentContentOrderByInput,
|
||||
input TorrentContentSearchQueryInput,
|
||||
) (TorrentContentSearchResult, error) {
|
||||
options := []q.Option{
|
||||
search.TorrentContentDefaultOption(),
|
||||
q.DefaultOption(),
|
||||
search.TorrentContentCoreJoins(),
|
||||
search.HydrateTorrentContentContent(),
|
||||
search.HydrateTorrentContentTorrent(),
|
||||
}
|
||||
hasQueryString := false
|
||||
if query != nil {
|
||||
options = append(options, query.Option())
|
||||
hasQueryString = query.QueryString.Valid
|
||||
}
|
||||
if facets != nil {
|
||||
options = append(options, input.Option())
|
||||
hasQueryString := input.QueryString.Valid
|
||||
if input.Facets != nil {
|
||||
var qFacets []q.Facet
|
||||
if contentType, ok := facets.ContentType.ValueOK(); ok {
|
||||
if contentType, ok := input.Facets.ContentType.ValueOK(); ok {
|
||||
qFacets = append(qFacets, torrentContentTypeFacet(*contentType))
|
||||
}
|
||||
if torrentSource, ok := facets.TorrentSource.ValueOK(); ok {
|
||||
if torrentSource, ok := input.Facets.TorrentSource.ValueOK(); ok {
|
||||
qFacets = append(qFacets, torrentSourceFacet(*torrentSource))
|
||||
}
|
||||
if torrentTag, ok := facets.TorrentTag.ValueOK(); ok {
|
||||
if torrentTag, ok := input.Facets.TorrentTag.ValueOK(); ok {
|
||||
qFacets = append(qFacets, torrentTagFacet(*torrentTag))
|
||||
}
|
||||
if torrentFileType, ok := facets.TorrentFileType.ValueOK(); ok {
|
||||
if torrentFileType, ok := input.Facets.TorrentFileType.ValueOK(); ok {
|
||||
qFacets = append(qFacets, torrentFileTypeFacet(*torrentFileType))
|
||||
}
|
||||
if language, ok := facets.Language.ValueOK(); ok {
|
||||
if language, ok := input.Facets.Language.ValueOK(); ok {
|
||||
qFacets = append(qFacets, languageFacet(*language))
|
||||
}
|
||||
if genre, ok := facets.Genre.ValueOK(); ok {
|
||||
if genre, ok := input.Facets.Genre.ValueOK(); ok {
|
||||
qFacets = append(qFacets, genreFacet(*genre))
|
||||
}
|
||||
if releaseYear, ok := facets.ReleaseYear.ValueOK(); ok {
|
||||
if releaseYear, ok := input.Facets.ReleaseYear.ValueOK(); ok {
|
||||
qFacets = append(qFacets, releaseYearFacet(*releaseYear))
|
||||
}
|
||||
if videoResolution, ok := facets.VideoResolution.ValueOK(); ok {
|
||||
if videoResolution, ok := input.Facets.VideoResolution.ValueOK(); ok {
|
||||
qFacets = append(qFacets, videoResolutionFacet(*videoResolution))
|
||||
}
|
||||
if videoSource, ok := facets.VideoSource.ValueOK(); ok {
|
||||
if videoSource, ok := input.Facets.VideoSource.ValueOK(); ok {
|
||||
qFacets = append(qFacets, videoSourceFacet(*videoSource))
|
||||
}
|
||||
options = append(options, q.WithFacet(qFacets...))
|
||||
}
|
||||
if infoHashes, ok := input.InfoHashes.ValueOK(); ok {
|
||||
options = append(options, q.Where(search.TorrentContentInfoHashCriteria(infoHashes...)))
|
||||
}
|
||||
fullOrderBy := maps.NewInsertMap[search.TorrentContentOrderBy, search.OrderDirection]()
|
||||
for _, ob := range orderBy {
|
||||
if ob.Field == gen.TorrentContentOrderByRelevance && !hasQueryString {
|
||||
for _, ob := range input.OrderBy {
|
||||
if ob.Field == gen.TorrentContentOrderByFieldRelevance && !hasQueryString {
|
||||
continue
|
||||
}
|
||||
direction := search.OrderDirectionAscending
|
||||
|
||||
58
internal/gql/gqlmodel/torrent_files.go
Normal file
58
internal/gql/gqlmodel/torrent_files.go
Normal file
@ -0,0 +1,58 @@
|
||||
package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
q "github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/maps"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/protocol"
|
||||
)
|
||||
|
||||
type TorrentFilesQueryInput struct {
|
||||
InfoHashes []protocol.ID
|
||||
Limit model.NullUint
|
||||
Page model.NullUint
|
||||
Offset model.NullUint
|
||||
TotalCount model.NullBool
|
||||
HasNextPage model.NullBool
|
||||
Cached model.NullBool
|
||||
OrderBy []gen.TorrentFilesOrderByInput
|
||||
}
|
||||
|
||||
func (t TorrentQuery) Files(ctx context.Context, query TorrentFilesQueryInput) (search.TorrentFilesResult, error) {
|
||||
limit := uint(10)
|
||||
if query.Limit.Valid {
|
||||
limit = query.Limit.Uint
|
||||
}
|
||||
options := []q.Option{
|
||||
q.SearchParams{
|
||||
Limit: model.NullUint{Valid: true, Uint: limit},
|
||||
Page: query.Page,
|
||||
Offset: query.Offset,
|
||||
TotalCount: query.TotalCount,
|
||||
HasNextPage: query.HasNextPage,
|
||||
AggregationBudget: model.NullFloat64{Valid: true, Float64: 0},
|
||||
}.Option(),
|
||||
}
|
||||
var criteria []q.Criteria
|
||||
if query.InfoHashes != nil {
|
||||
criteria = append(criteria, search.TorrentFileInfoHashCriteria(query.InfoHashes...))
|
||||
}
|
||||
options = append(options, q.Where(criteria...))
|
||||
fullOrderBy := maps.NewInsertMap[search.TorrentFilesOrderBy, search.OrderDirection]()
|
||||
for _, ob := range query.OrderBy {
|
||||
direction := search.OrderDirectionAscending
|
||||
if desc, ok := ob.Descending.ValueOK(); ok && *desc {
|
||||
direction = search.OrderDirectionDescending
|
||||
}
|
||||
field, err := search.ParseTorrentFilesOrderBy(ob.Field.String())
|
||||
if err != nil {
|
||||
return search.TorrentFilesResult{}, err
|
||||
}
|
||||
fullOrderBy.Set(field, direction)
|
||||
}
|
||||
options = append(options, search.TorrentFilesFullOrderBy(fullOrderBy).Option())
|
||||
return t.Search.TorrentFiles(ctx, options...)
|
||||
}
|
||||
38
internal/gql/gqlmodel/torrent_metrics.go
Normal file
38
internal/gql/gqlmodel/torrent_metrics.go
Normal file
@ -0,0 +1,38 @@
|
||||
package gqlmodel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics"
|
||||
)
|
||||
|
||||
func (t TorrentQuery) Metrics(ctx context.Context, input gen.TorrentMetricsQueryInput) (*gen.TorrentMetricsQueryResult, error) {
|
||||
req := torrentmetrics.Request{}
|
||||
switch input.BucketDuration {
|
||||
case gen.MetricsBucketDurationMinute:
|
||||
req.BucketDuration = "minute"
|
||||
case gen.MetricsBucketDurationHour:
|
||||
req.BucketDuration = "hour"
|
||||
case gen.MetricsBucketDurationDay:
|
||||
req.BucketDuration = "day"
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid bucket duration: %s", input.BucketDuration)
|
||||
}
|
||||
if t, ok := input.StartTime.ValueOK(); ok && !t.IsZero() {
|
||||
req.StartTime = *t
|
||||
}
|
||||
if t, ok := input.EndTime.ValueOK(); ok && !t.IsZero() {
|
||||
req.EndTime = *t
|
||||
}
|
||||
if sources, ok := input.Sources.ValueOK(); ok {
|
||||
req.Sources = sources
|
||||
}
|
||||
buckets, err := t.TorrentMetricsClient.Request(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gen.TorrentMetricsQueryResult{
|
||||
Buckets: buckets,
|
||||
}, nil
|
||||
}
|
||||
@ -2,7 +2,7 @@ package resolvers
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.45
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.55
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -22,8 +22,8 @@ func (r *contentResolver) OriginalLanguage(ctx context.Context, obj *model.Conte
|
||||
}
|
||||
|
||||
// Sources is the resolver for the sources field.
|
||||
func (r *torrentResolver) Sources(ctx context.Context, obj *model.Torrent) ([]gqlmodel.TorrentSource, error) {
|
||||
return gqlmodel.TorrentSourcesFromTorrent(*obj), nil
|
||||
func (r *torrentResolver) Sources(ctx context.Context, obj *model.Torrent) ([]gqlmodel.TorrentSourceInfo, error) {
|
||||
return gqlmodel.TorrentSourceInfosFromTorrent(*obj), nil
|
||||
}
|
||||
|
||||
// Content returns gql.ContentResolver implementation.
|
||||
|
||||
@ -2,7 +2,7 @@ package resolvers
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.45
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.55
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -17,25 +17,30 @@ func (r *mutationResolver) Torrent(ctx context.Context) (gqlmodel.TorrentMutatio
|
||||
return gqlmodel.TorrentMutation{}, nil
|
||||
}
|
||||
|
||||
// Queue is the resolver for the queue field.
|
||||
func (r *mutationResolver) Queue(ctx context.Context) (gqlmodel.QueueMutation, error) {
|
||||
return gqlmodel.QueueMutation{QueueManager: r.QueueManager}, nil
|
||||
}
|
||||
|
||||
// Delete is the resolver for the delete field.
|
||||
func (r *torrentMutationResolver) Delete(ctx context.Context, obj *gqlmodel.TorrentMutation, infoHashes []protocol.ID) (*string, error) {
|
||||
_, err := r.dao.DeleteAndBlockTorrents(ctx, infoHashes)
|
||||
_, err := r.Dao.DeleteAndBlockTorrents(ctx, infoHashes)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PutTags is the resolver for the putTags field.
|
||||
func (r *torrentMutationResolver) PutTags(ctx context.Context, obj *gqlmodel.TorrentMutation, infoHashes []protocol.ID, tagNames []string) (*string, error) {
|
||||
return nil, r.dao.TorrentTag.Put(ctx, infoHashes, tagNames)
|
||||
return nil, r.Dao.TorrentTag.Put(ctx, infoHashes, tagNames)
|
||||
}
|
||||
|
||||
// SetTags is the resolver for the setTags field.
|
||||
func (r *torrentMutationResolver) SetTags(ctx context.Context, obj *gqlmodel.TorrentMutation, infoHashes []protocol.ID, tagNames []string) (*string, error) {
|
||||
return nil, r.dao.TorrentTag.Set(ctx, infoHashes, tagNames)
|
||||
return nil, r.Dao.TorrentTag.Set(ctx, infoHashes, tagNames)
|
||||
}
|
||||
|
||||
// DeleteTags is the resolver for the deleteTags field.
|
||||
func (r *torrentMutationResolver) DeleteTags(ctx context.Context, obj *gqlmodel.TorrentMutation, infoHashes []protocol.ID, tagNames []string) (*string, error) {
|
||||
return nil, r.dao.TorrentTag.Delete(ctx, infoHashes, tagNames)
|
||||
return nil, r.Dao.TorrentTag.Delete(ctx, infoHashes, tagNames)
|
||||
}
|
||||
|
||||
// Mutation returns gql.MutationResolver implementation.
|
||||
|
||||
@ -2,39 +2,114 @@ package resolvers
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.45
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.55
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/query"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel/gen"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/version"
|
||||
)
|
||||
|
||||
// Version is the resolver for the version field.
|
||||
func (r *queryResolver) Version(ctx context.Context) (string, error) {
|
||||
return version.GitTag, nil
|
||||
}
|
||||
|
||||
// Workers is the resolver for the workers field.
|
||||
func (r *queryResolver) Workers(ctx context.Context) (gen.WorkersQuery, error) {
|
||||
var workers []gen.Worker
|
||||
for _, w := range r.Resolver.Workers.Workers() {
|
||||
workers = append(workers, gen.Worker{
|
||||
Key: w.Key(),
|
||||
Started: w.Started(),
|
||||
})
|
||||
}
|
||||
return gen.WorkersQuery{
|
||||
ListAll: gen.WorkersListAllQueryResult{
|
||||
Workers: workers,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Health is the resolver for the health field.
|
||||
func (r *queryResolver) Health(ctx context.Context) (gen.HealthQuery, error) {
|
||||
transformHealthCheckStatus := func(s health.AvailabilityStatus) gen.HealthStatus {
|
||||
switch s {
|
||||
case health.StatusInactive:
|
||||
return gen.HealthStatusInactive
|
||||
case health.StatusDown:
|
||||
return gen.HealthStatusDown
|
||||
case health.StatusUp:
|
||||
return gen.HealthStatusUp
|
||||
default:
|
||||
return gen.HealthStatusUnknown
|
||||
}
|
||||
}
|
||||
check := r.Checker.Check(ctx)
|
||||
checks := make([]gen.HealthCheck, 0, len(check.Details))
|
||||
for k, v := range check.Details {
|
||||
var err *string
|
||||
if v.Error != nil {
|
||||
strErr := v.Error.Error()
|
||||
err = &strErr
|
||||
}
|
||||
checks = append(checks, gen.HealthCheck{
|
||||
Key: k,
|
||||
Status: transformHealthCheckStatus(v.Status),
|
||||
Timestamp: v.Timestamp,
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
sort.Slice(checks, func(i, j int) bool {
|
||||
return checks[i].Key < checks[j].Key
|
||||
})
|
||||
result := gen.HealthQuery{
|
||||
Status: transformHealthCheckStatus(check.Status),
|
||||
Checks: checks,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Queue is the resolver for the queue field.
|
||||
func (r *queryResolver) Queue(ctx context.Context) (gqlmodel.QueueQuery, error) {
|
||||
return gqlmodel.QueueQuery{QueueMetricsClient: r.QueueMetricsClient}, nil
|
||||
}
|
||||
|
||||
// Torrent is the resolver for the torrent field.
|
||||
func (r *queryResolver) Torrent(ctx context.Context) (gqlmodel.TorrentQuery, error) {
|
||||
return gqlmodel.TorrentQuery{
|
||||
TorrentSearch: r.search,
|
||||
Dao: r.Dao,
|
||||
Search: r.Search,
|
||||
TorrentMetricsClient: r.TorrentMetricsClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TorrentContent is the resolver for the torrentContent field.
|
||||
func (r *queryResolver) TorrentContent(ctx context.Context) (gqlmodel.TorrentContentQuery, error) {
|
||||
return gqlmodel.TorrentContentQuery{
|
||||
TorrentContentSearch: r.search,
|
||||
TorrentContentSearch: r.Search,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// System is the resolver for the system field.
|
||||
func (r *queryResolver) System(ctx context.Context) (gen.SystemQuery, error) {
|
||||
return gen.SystemQuery{
|
||||
Version: version.GitTag,
|
||||
}, nil
|
||||
// Files is the resolver for the files field.
|
||||
func (r *torrentQueryResolver) Files(ctx context.Context, obj *gqlmodel.TorrentQuery, input gqlmodel.TorrentFilesQueryInput) (query.GenericResult[model.TorrentFile], error) {
|
||||
return gqlmodel.TorrentQuery{
|
||||
Search: r.Search,
|
||||
}.Files(ctx, input)
|
||||
}
|
||||
|
||||
// Query returns gql.QueryResolver implementation.
|
||||
func (r *Resolver) Query() gql.QueryResolver { return &queryResolver{r} }
|
||||
|
||||
// TorrentQuery returns gql.TorrentQueryResolver implementation.
|
||||
func (r *Resolver) TorrentQuery() gql.TorrentQueryResolver { return &torrentQueryResolver{r} }
|
||||
|
||||
type queryResolver struct{ *Resolver }
|
||||
type torrentQueryResolver struct{ *Resolver }
|
||||
|
||||
56
internal/gql/resolvers/queue.resolvers.go
Normal file
56
internal/gql/resolvers/queue.resolvers.go
Normal file
@ -0,0 +1,56 @@
|
||||
package resolvers
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.55
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql/gqlmodel"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/model"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/processor"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/queue/manager"
|
||||
)
|
||||
|
||||
// RanAt is the resolver for the ranAt field.
|
||||
func (r *queueJobResolver) RanAt(ctx context.Context, obj *model.QueueJob) (*time.Time, error) {
|
||||
if obj == nil {
|
||||
return nil, nil
|
||||
}
|
||||
t := obj.RanAt.Time
|
||||
if t.IsZero() {
|
||||
return nil, nil
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
// Jobs is the resolver for the jobs field.
|
||||
func (r *queueQueryResolver) Jobs(ctx context.Context, obj *gqlmodel.QueueQuery, input gqlmodel.QueueJobsQueryInput) (gqlmodel.QueueJobsQueryResult, error) {
|
||||
return gqlmodel.QueueQuery{QueueJobSearch: r.Search}.Jobs(ctx, input)
|
||||
}
|
||||
|
||||
// ClassifierRematch is the resolver for the classifierRematch field.
|
||||
func (r *queueEnqueueReprocessTorrentsBatchInputResolver) ClassifierRematch(ctx context.Context, obj *manager.EnqueueReprocessTorrentsBatchRequest, data *bool) error {
|
||||
if data != nil && *data {
|
||||
obj.ClassifyMode = processor.ClassifyModeRematch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueJob returns gql.QueueJobResolver implementation.
|
||||
func (r *Resolver) QueueJob() gql.QueueJobResolver { return &queueJobResolver{r} }
|
||||
|
||||
// QueueQuery returns gql.QueueQueryResolver implementation.
|
||||
func (r *Resolver) QueueQuery() gql.QueueQueryResolver { return &queueQueryResolver{r} }
|
||||
|
||||
// QueueEnqueueReprocessTorrentsBatchInput returns gql.QueueEnqueueReprocessTorrentsBatchInputResolver implementation.
|
||||
func (r *Resolver) QueueEnqueueReprocessTorrentsBatchInput() gql.QueueEnqueueReprocessTorrentsBatchInputResolver {
|
||||
return &queueEnqueueReprocessTorrentsBatchInputResolver{r}
|
||||
}
|
||||
|
||||
type queueJobResolver struct{ *Resolver }
|
||||
type queueQueryResolver struct{ *Resolver }
|
||||
type queueEnqueueReprocessTorrentsBatchInputResolver struct{ *Resolver }
|
||||
@ -1,9 +1,13 @@
|
||||
package resolvers
|
||||
|
||||
import (
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/boilerplate/worker"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/dao"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/database/search"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/gql"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/health"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/queuemetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/metrics/torrentmetrics"
|
||||
"github.com/bitmagnet-io/bitmagnet/internal/queue/manager"
|
||||
)
|
||||
|
||||
// This file will not be regenerated automatically.
|
||||
@ -11,16 +15,11 @@ import (
|
||||
// It serves as dependency injection for your app, add any dependencies you require here.
|
||||
|
||||
type Resolver struct {
|
||||
dao *dao.Query
|
||||
search search.Search
|
||||
}
|
||||
|
||||
func New(
|
||||
dao *dao.Query,
|
||||
search search.Search,
|
||||
) gql.ResolverRoot {
|
||||
return &Resolver{
|
||||
dao: dao,
|
||||
search: search,
|
||||
}
|
||||
Dao *dao.Query
|
||||
Search search.Search
|
||||
Workers worker.Registry
|
||||
Checker health.Checker
|
||||
QueueMetricsClient queuemetrics.Client
|
||||
QueueManager manager.Manager
|
||||
TorrentMetricsClient torrentmetrics.Client
|
||||
}
|
||||
|
||||
587
internal/health/check.go
Normal file
587
internal/health/check.go
Normal file
@ -0,0 +1,587 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
checkerConfig struct {
|
||||
timeout time.Duration
|
||||
info map[string]interface{}
|
||||
checks map[string]*Check
|
||||
cacheTTL time.Duration
|
||||
statusChangeListener func(context.Context, CheckerState)
|
||||
interceptors []Interceptor
|
||||
detailsDisabled bool
|
||||
autostartDisabled bool
|
||||
}
|
||||
|
||||
defaultChecker struct {
|
||||
started bool
|
||||
startedAt time.Time
|
||||
mtx sync.Mutex
|
||||
cfg checkerConfig
|
||||
state CheckerState
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
periodicCheckCount int
|
||||
}
|
||||
|
||||
checkResult struct {
|
||||
checkName string
|
||||
newState CheckState
|
||||
}
|
||||
|
||||
jsonCheckResult struct {
|
||||
Status string `json:"status"`
|
||||
Timestamp time.Time `json:"timestamp,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Checker is the main checker interface. It provides all health checking logic.
|
||||
Checker interface {
|
||||
// Start will start all necessary background workers and prepare
|
||||
// the checker for further usage.
|
||||
Start()
|
||||
// Stop stops will stop the checker.
|
||||
Stop()
|
||||
// Check runs all synchronous (i.e., non-periodic) check functions.
|
||||
// It returns the aggregated health status (combined from the results
|
||||
// of this executions synchronous checks and the previously reported
|
||||
// results of asynchronous/periodic checks. This function expects a
|
||||
// context, that may contain deadlines to which will be adhered to.
|
||||
// The context will be passed to all downstream calls
|
||||
// (such as listeners, component check functions, and interceptors).
|
||||
Check(ctx context.Context) CheckerResult
|
||||
// GetRunningPeriodicCheckCount returns the number of currently
|
||||
// running periodic checks.
|
||||
GetRunningPeriodicCheckCount() int
|
||||
// IsStarted returns true, if the Checker was started (see Checker.Start)
|
||||
// and is currently still running. Returns false otherwise.
|
||||
IsStarted() bool
|
||||
StartedAt() time.Time
|
||||
}
|
||||
|
||||
// CheckerState represents the current state of the Checker.
|
||||
CheckerState struct {
|
||||
// Status is the aggregated system health status.
|
||||
Status AvailabilityStatus
|
||||
// CheckState contains the state of all checks.
|
||||
CheckState map[string]CheckState
|
||||
}
|
||||
|
||||
// CheckState represents the current state of a component check.
|
||||
CheckState struct {
|
||||
// LastCheckedAt holds the time of when the check was last executed.
|
||||
LastCheckedAt time.Time
|
||||
// LastCheckedAt holds the last time of when the check did not return an error.
|
||||
LastSuccessAt time.Time
|
||||
// LastFailureAt holds the last time of when the check did return an error.
|
||||
LastFailureAt time.Time
|
||||
// FirstCheckStartedAt holds the time of when the first check was started.
|
||||
FirstCheckStartedAt time.Time
|
||||
// ContiguousFails holds the number of how often the check failed in a row.
|
||||
ContiguousFails uint
|
||||
// Result holds the error of the last check (nil if successful).
|
||||
Result error
|
||||
// The current availability status of the check.
|
||||
Status AvailabilityStatus
|
||||
}
|
||||
|
||||
// CheckerResult holds the aggregated system availability status and
|
||||
// detailed information about the individual checks.
|
||||
CheckerResult struct {
|
||||
// Info contains additional information about this health result.
|
||||
Info map[string]interface{} `json:"info,omitempty"`
|
||||
// Status is the aggregated system availability status.
|
||||
Status AvailabilityStatus `json:"status"`
|
||||
// Details contains health information for all checked components.
|
||||
Details map[string]CheckResult `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// CheckResult holds a components health information.
|
||||
// Attention: This type is converted from/to JSON using a custom
|
||||
// marshalling/unmarshalling function (see type jsonCheckResult).
|
||||
// This is required because some fields are not converted automatically
|
||||
// by the standard json.Marshal/json.Unmarshal functions
|
||||
// (such as the error interface). The JSON tags you see here, are
|
||||
// just there for the readers' convenience.
|
||||
CheckResult struct {
|
||||
// Status is the availability status of a component.
|
||||
Status AvailabilityStatus `json:"status"`
|
||||
// Timestamp holds the time when the check was executed.
|
||||
Timestamp time.Time `json:"timestamp,omitempty"`
|
||||
// Error contains the check error message, if the check failed.
|
||||
Error error `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Interceptor is factory function that allows creating new instances of
|
||||
// a InterceptorFunc. The concept behind Interceptor is similar to the
|
||||
// middleware pattern. A InterceptorFunc that is created by calling a
|
||||
// Interceptor is expected to forward the function call to the next
|
||||
// InterceptorFunc (passed to the Interceptor in parameter 'next').
|
||||
// This way, a chain of interceptors is constructed that will eventually
|
||||
// invoke of the components health check function. Each interceptor must therefore
|
||||
// invoke the 'next' interceptor. If the 'next' InterceptorFunc is not called,
|
||||
// the components check health function will never be executed.
|
||||
Interceptor func(next InterceptorFunc) InterceptorFunc
|
||||
|
||||
// InterceptorFunc is an interceptor function that intercepts any call to
|
||||
// a components health check function.
|
||||
InterceptorFunc func(ctx context.Context, checkName string, state CheckState) CheckState
|
||||
|
||||
// AvailabilityStatus expresses the availability of either
|
||||
// a component or the whole system.
|
||||
AvailabilityStatus string
|
||||
)
|
||||
|
||||
const (
|
||||
// StatusUnknown holds the information that the availability
|
||||
// status is not known, because not all checks were executed yet.
|
||||
StatusUnknown AvailabilityStatus = "unknown"
|
||||
// StatusUp holds the information that the system or a component
|
||||
// is up and running.
|
||||
StatusUp AvailabilityStatus = "up"
|
||||
// StatusDown holds the information that the system or a component
|
||||
// down and not available.
|
||||
StatusDown AvailabilityStatus = "down"
|
||||
// StatusInactive holds the information that a component
|
||||
// is not currently active.
|
||||
StatusInactive AvailabilityStatus = "inactive"
|
||||
)
|
||||
|
||||
// MarshalJSON provides a custom marshaller for the CheckResult type.
|
||||
func (cr CheckResult) MarshalJSON() ([]byte, error) {
|
||||
errorMsg := ""
|
||||
if cr.Error != nil {
|
||||
errorMsg = cr.Error.Error()
|
||||
}
|
||||
|
||||
return json.Marshal(&jsonCheckResult{
|
||||
Status: string(cr.Status),
|
||||
Timestamp: cr.Timestamp,
|
||||
Error: errorMsg,
|
||||
})
|
||||
}
|
||||
|
||||
func (cr *CheckResult) UnmarshalJSON(data []byte) error {
|
||||
var result jsonCheckResult
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cr.Status = AvailabilityStatus(result.Status)
|
||||
cr.Timestamp = result.Timestamp
|
||||
|
||||
if result.Error != "" {
|
||||
cr.Error = errors.New(result.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s AvailabilityStatus) criticality() int {
|
||||
switch s {
|
||||
case StatusDown:
|
||||
return 2
|
||||
case StatusUnknown:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
CheckTimeoutErr = errors.New("check timed out")
|
||||
)
|
||||
|
||||
func newChecker(cfg checkerConfig) *defaultChecker {
|
||||
checkState := map[string]CheckState{}
|
||||
for _, check := range cfg.checks {
|
||||
checkState[check.Name] = CheckState{Status: StatusUnknown}
|
||||
}
|
||||
|
||||
checker := defaultChecker{
|
||||
cfg: cfg,
|
||||
state: CheckerState{Status: StatusUnknown, CheckState: checkState},
|
||||
}
|
||||
|
||||
if !cfg.autostartDisabled {
|
||||
checker.Start()
|
||||
}
|
||||
|
||||
return &checker
|
||||
}
|
||||
|
||||
// Start implements Checker.Start. Please refer to Checker.Start for more information.
|
||||
func (ck *defaultChecker) Start() {
|
||||
ck.mtx.Lock()
|
||||
|
||||
if !ck.started {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ck.cancel = cancel
|
||||
|
||||
ck.started = true
|
||||
ck.startedAt = time.Now()
|
||||
defer ck.startPeriodicChecks(ctx)
|
||||
|
||||
// We run the initial check execution in a separate goroutine so that server startup is not blocked in case of
|
||||
// a bad check that runs for a longer period of time.
|
||||
go ck.Check(ctx)
|
||||
}
|
||||
|
||||
// Attention: We should avoid having this unlock as a deferred function call right after the mutex lock above,
|
||||
// since this may cause a deadlock (e.g., startPeriodicChecks requires the mutex lock as well and would block
|
||||
// because of the defer order)
|
||||
ck.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Stop implements Checker.Stop. Please refer to Checker.Stop for more information.
|
||||
func (ck *defaultChecker) Stop() {
|
||||
ck.cancel()
|
||||
ck.wg.Wait()
|
||||
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
|
||||
ck.started = false
|
||||
ck.periodicCheckCount = 0
|
||||
}
|
||||
|
||||
// GetRunningPeriodicCheckCount implements Checker.GetRunningPeriodicCheckCount.
|
||||
// Please refer to Checker.GetRunningPeriodicCheckCount for more information.
|
||||
func (ck *defaultChecker) GetRunningPeriodicCheckCount() int {
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
return ck.periodicCheckCount
|
||||
}
|
||||
|
||||
// IsStarted implements Checker.IsStarted. Please refer to Checker.IsStarted for more information.
|
||||
func (ck *defaultChecker) IsStarted() bool {
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
return ck.started
|
||||
}
|
||||
|
||||
// StartedAt implements Checker.StartedAt.
|
||||
func (ck *defaultChecker) StartedAt() time.Time {
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
return ck.startedAt
|
||||
}
|
||||
|
||||
// Check implements Checker.Check. Please refer to Checker.Check for more information.
|
||||
func (ck *defaultChecker) Check(ctx context.Context) CheckerResult {
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ck.cfg.timeout)
|
||||
defer cancel()
|
||||
|
||||
ck.runSynchronousChecks(ctx)
|
||||
|
||||
return ck.mapStateToCheckerResult()
|
||||
}
|
||||
|
||||
func (ck *defaultChecker) runSynchronousChecks(ctx context.Context) {
|
||||
var (
|
||||
numChecks = len(ck.cfg.checks)
|
||||
numInitiatedChecks = 0
|
||||
resChan = make(chan checkResult, numChecks)
|
||||
)
|
||||
|
||||
for _, check := range ck.cfg.checks {
|
||||
check := check
|
||||
|
||||
if !isPeriodicCheck(check) {
|
||||
checkState := ck.state.CheckState[check.Name]
|
||||
|
||||
if !isCacheExpired(ck.cfg.cacheTTL, &checkState) {
|
||||
continue
|
||||
}
|
||||
|
||||
numInitiatedChecks++
|
||||
|
||||
go func() {
|
||||
withCheckContext(ctx, check, func(ctx context.Context) {
|
||||
_, checkState := executeCheck(ctx, &ck.cfg, check, checkState)
|
||||
resChan <- checkResult{check.Name, checkState}
|
||||
})
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]checkResult, 0, numInitiatedChecks)
|
||||
for len(results) < numInitiatedChecks {
|
||||
results = append(results, <-resChan)
|
||||
}
|
||||
|
||||
ck.updateState(ctx, results...)
|
||||
}
|
||||
|
||||
func (ck *defaultChecker) startPeriodicChecks(ctx context.Context) {
|
||||
ck.mtx.Lock()
|
||||
defer ck.mtx.Unlock()
|
||||
|
||||
// Start periodic checks.
|
||||
for _, check := range ck.cfg.checks {
|
||||
check := check
|
||||
|
||||
if isPeriodicCheck(check) {
|
||||
// ATTENTION: Access to check and ck.state.CheckState is not synchronized here,
|
||||
// assuming that the accessed values are never changed, such as
|
||||
// - ck.state.CheckState[check.Name]
|
||||
// - check object itself (there will never be a new Check object created for the configured check)
|
||||
// - check.updateInterval (used by isPeriodicCheck)
|
||||
// - check.initialDelay
|
||||
// ALSO:
|
||||
// - The check state itself is never synchronized on, since the only place where values can be changed are
|
||||
// within this goroutine.
|
||||
|
||||
ck.periodicCheckCount++
|
||||
ck.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer ck.wg.Done()
|
||||
|
||||
if check.initialDelay > 0 {
|
||||
if waitForStopSignal(ctx, check.initialDelay) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
withCheckContext(ctx, check, func(ctx context.Context) {
|
||||
ck.mtx.Lock()
|
||||
checkState := ck.state.CheckState[check.Name]
|
||||
ck.mtx.Unlock()
|
||||
|
||||
// ATTENTION: This function may panic, if panic handling is disabled
|
||||
// via "check.DisablePanicRecovery".
|
||||
//
|
||||
// ATTENTION: executeCheck is executed with its own copy of the checks
|
||||
// state (see checkState above). This means that if there is a global status
|
||||
// listener that is configured by the user with health.WithStatusListener,
|
||||
// and that global status listener changes this checks state as long as
|
||||
// executeCheck is running, the modifications made by the global listener
|
||||
// will be lost after the function completes, since we overwrite the state
|
||||
// below using updateState.
|
||||
// This means that global listeners should not change the checks state
|
||||
// or accept losing their updates. This will be the case especially for
|
||||
// long-running checks. Hence, the checkState is read-only for interceptors.
|
||||
ctx, checkState = executeCheck(ctx, &ck.cfg, check, checkState)
|
||||
|
||||
ck.mtx.Lock()
|
||||
ck.updateState(ctx, checkResult{check.Name, checkState})
|
||||
ck.mtx.Unlock()
|
||||
})
|
||||
|
||||
if waitForStopSignal(ctx, check.updateInterval) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ck *defaultChecker) updateState(ctx context.Context, updates ...checkResult) {
|
||||
for _, update := range updates {
|
||||
ck.state.CheckState[update.checkName] = update.newState
|
||||
}
|
||||
|
||||
oldStatus := ck.state.Status
|
||||
ck.state.Status = aggregateStatus(ck.state.CheckState)
|
||||
|
||||
if oldStatus != ck.state.Status && ck.cfg.statusChangeListener != nil {
|
||||
ck.cfg.statusChangeListener(ctx, ck.state)
|
||||
}
|
||||
}
|
||||
|
||||
func (ck *defaultChecker) mapStateToCheckerResult() CheckerResult {
|
||||
var (
|
||||
checkResults map[string]CheckResult
|
||||
numChecks = len(ck.cfg.checks)
|
||||
status = ck.state.Status
|
||||
)
|
||||
|
||||
if numChecks > 0 && !ck.cfg.detailsDisabled {
|
||||
checkResults = make(map[string]CheckResult, numChecks)
|
||||
for _, check := range ck.cfg.checks {
|
||||
checkState := ck.state.CheckState[check.Name]
|
||||
timestamp := checkState.LastCheckedAt
|
||||
if timestamp.IsZero() {
|
||||
timestamp = ck.startedAt
|
||||
}
|
||||
checkResults[check.Name] = CheckResult{
|
||||
Status: checkState.Status,
|
||||
Error: checkState.Result,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return CheckerResult{Status: status, Details: checkResults, Info: ck.cfg.info}
|
||||
}
|
||||
|
||||
func isCacheExpired(cacheDuration time.Duration, state *CheckState) bool {
|
||||
return state.LastCheckedAt.IsZero() || state.LastCheckedAt.Before(time.Now().Add(-cacheDuration))
|
||||
}
|
||||
|
||||
func isActiveCheck(check *Check) bool {
|
||||
return check.IsActive == nil || check.IsActive()
|
||||
}
|
||||
|
||||
func isPeriodicCheck(check *Check) bool {
|
||||
return check.updateInterval > 0
|
||||
}
|
||||
|
||||
func waitForStopSignal(ctx context.Context, waitTime time.Duration) bool {
|
||||
select {
|
||||
case <-time.After(waitTime):
|
||||
return false
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func withCheckContext(ctx context.Context, check *Check, f func(checkCtx context.Context)) {
|
||||
cancel := func() {}
|
||||
if check.Timeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, check.Timeout)
|
||||
}
|
||||
defer cancel()
|
||||
f(ctx)
|
||||
}
|
||||
|
||||
func executeCheck(
|
||||
ctx context.Context,
|
||||
cfg *checkerConfig,
|
||||
check *Check,
|
||||
oldState CheckState,
|
||||
) (context.Context, CheckState) {
|
||||
newState := oldState
|
||||
|
||||
if newState.FirstCheckStartedAt.IsZero() {
|
||||
newState.FirstCheckStartedAt = time.Now()
|
||||
}
|
||||
|
||||
// We copy explicitly to not affect the underlying array of the slices as a side effect.
|
||||
// These slices are being passed to this library as configuration parameters, so we don't know how they
|
||||
// are being used otherwise in the users program.
|
||||
interceptors := make([]Interceptor, 0, len(cfg.interceptors)+len(check.Interceptors))
|
||||
interceptors = append(interceptors, cfg.interceptors...)
|
||||
interceptors = append(interceptors, check.Interceptors...)
|
||||
|
||||
if isActiveCheck(check) {
|
||||
newState = withInterceptors(interceptors, func(ctx context.Context, _ string, state CheckState) CheckState {
|
||||
checkFuncResult := executeCheckFunc(ctx, check)
|
||||
return createNextCheckState(checkFuncResult, check, state)
|
||||
})(ctx, check.Name, newState)
|
||||
} else {
|
||||
newState.Status = StatusInactive
|
||||
}
|
||||
|
||||
if check.StatusListener != nil && oldState.Status != newState.Status {
|
||||
check.StatusListener(ctx, check.Name, newState)
|
||||
}
|
||||
|
||||
return ctx, newState
|
||||
}
|
||||
|
||||
func executeCheckFunc(ctx context.Context, check *Check) error {
|
||||
// If this channel is not bounded, we may have a goroutine leak (e.g., when ctx.Done signals first then
|
||||
// sending the check result into the channel will block forever).
|
||||
res := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if !check.DisablePanicRecovery {
|
||||
if r := recover(); r != nil {
|
||||
// TODO: Provide a configurable panic handler configuration option, so developers can decide
|
||||
// what to do with panics.
|
||||
err, ok := r.(error)
|
||||
if ok {
|
||||
res <- err
|
||||
} else {
|
||||
res <- fmt.Errorf("%v", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
res <- check.Check(ctx)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-res:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return CheckTimeoutErr
|
||||
}
|
||||
}
|
||||
|
||||
func createNextCheckState(result error, check *Check, state CheckState) CheckState {
|
||||
now := time.Now()
|
||||
|
||||
state.Result = result
|
||||
state.LastCheckedAt = now
|
||||
|
||||
if state.Result == nil {
|
||||
state.ContiguousFails = 0
|
||||
state.LastSuccessAt = now
|
||||
} else {
|
||||
state.ContiguousFails++
|
||||
state.LastFailureAt = now
|
||||
}
|
||||
|
||||
state.Status = evaluateCheckStatus(&state, check.MaxTimeInError, check.MaxContiguousFails)
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
func evaluateCheckStatus(state *CheckState, maxTimeInError time.Duration, maxFails uint) AvailabilityStatus {
|
||||
if state.LastCheckedAt.IsZero() {
|
||||
return StatusUnknown
|
||||
} else if state.Result != nil {
|
||||
maxTimeInErrorSinceStartPassed := !state.FirstCheckStartedAt.Add(maxTimeInError).After(time.Now())
|
||||
maxTimeInErrorSinceLastSuccessPassed := state.LastSuccessAt.IsZero() ||
|
||||
!state.LastSuccessAt.Add(maxTimeInError).After(time.Now())
|
||||
|
||||
timeInErrorThresholdCrossed := maxTimeInErrorSinceStartPassed && maxTimeInErrorSinceLastSuccessPassed
|
||||
failCountThresholdCrossed := state.ContiguousFails >= maxFails
|
||||
|
||||
if failCountThresholdCrossed && timeInErrorThresholdCrossed {
|
||||
return StatusDown
|
||||
}
|
||||
}
|
||||
|
||||
return StatusUp
|
||||
}
|
||||
|
||||
func aggregateStatus(results map[string]CheckState) AvailabilityStatus {
|
||||
status := StatusUp
|
||||
|
||||
for _, result := range results {
|
||||
if result.Status.criticality() > status.criticality() {
|
||||
status = result.Status
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func withInterceptors(interceptors []Interceptor, target InterceptorFunc) InterceptorFunc {
|
||||
chain := target
|
||||
|
||||
for idx := len(interceptors) - 1; idx >= 0; idx-- {
|
||||
chain = interceptors[idx](chain)
|
||||
}
|
||||
|
||||
return chain
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user