From 05ddc1f7833e4efa1aa9111a911d552cd43f7bf4 Mon Sep 17 00:00:00 2001 From: cdn0x12 Date: Fri, 21 Feb 2025 00:49:20 +0800 Subject: [PATCH] [feature] migrate to monorepo --- .forgejo/workflows/build.yml | 50 + .forgejo/workflows/test.yml | 33 + .gitignore | 1 + README.md | 48 + api/.gitignore | 4 + api/package.json | 17 + api/schemas/components/parameters.yaml | 70 + api/schemas/components/responses.yaml | 67 + api/schemas/components/schemas.yaml | 375 + api/schemas/openapi.yaml | 123 + api/schemas/paths/auth.yaml | 54 + api/schemas/paths/categories.yaml | 165 + api/schemas/paths/contributors.yaml | 139 + api/schemas/paths/daily.yaml | 170 + api/schemas/paths/media.yaml | 111 + api/schemas/paths/posts.yaml | 198 + api/schemas/paths/users.yaml | 240 + backend/.gitignore | 42 + backend/Dockerfile | 21 + backend/LICENSE.md | 660 + backend/README.md | 69 + backend/cmd/server/main.go | 79 + backend/cmd/server/main_test.go | 127 + backend/config/config.yaml | 91 + backend/docker-compose.yml | 10 + backend/ent/category.go | 173 + backend/ent/category/category.go | 157 + backend/ent/category/where.go | 230 + backend/ent/category_create.go | 323 + backend/ent/category_delete.go | 88 + backend/ent/category_query.go | 757 ++ backend/ent/category_update.go | 735 ++ backend/ent/categorycontent.go | 175 + .../ent/categorycontent/categorycontent.go | 139 + backend/ent/categorycontent/where.go | 333 + backend/ent/categorycontent_create.go | 279 + backend/ent/categorycontent_delete.go | 88 + backend/ent/categorycontent_query.go | 614 + backend/ent/categorycontent_update.go | 484 + backend/ent/client.go | 2939 +++++ backend/ent/contributor.go | 221 + backend/ent/contributor/contributor.go | 187 + backend/ent/contributor/where.go | 460 + backend/ent/contributor_create.go | 382 + backend/ent/contributor_delete.go | 88 + backend/ent/contributor_query.go | 765 ++ backend/ent/contributor_update.go | 845 ++ backend/ent/contributorrole.go | 129 + .../ent/contributorrole/contributorrole.go | 83 + backend/ent/contributorrole/where.go | 163 + backend/ent/contributorrole_create.go | 220 + backend/ent/contributorrole_delete.go | 88 + backend/ent/contributorrole_query.go | 609 + backend/ent/contributorrole_update.go | 398 + backend/ent/contributorsociallink.go | 164 + .../contributorsociallink.go | 132 + backend/ent/contributorsociallink/where.go | 263 + backend/ent/contributorsociallink_create.go | 261 + backend/ent/contributorsociallink_delete.go | 88 + backend/ent/contributorsociallink_query.go | 614 + backend/ent/contributorsociallink_update.go | 440 + backend/ent/daily.go | 191 + backend/ent/daily/daily.go | 144 + backend/ent/daily/where.go | 287 + backend/ent/daily_create.go | 325 + backend/ent/daily_delete.go | 88 + backend/ent/daily_query.go | 690 ++ backend/ent/daily_update.go | 568 + backend/ent/dailycategory.go | 157 + backend/ent/dailycategory/dailycategory.go | 127 + backend/ent/dailycategory/where.go | 207 + backend/ent/dailycategory_create.go | 291 + backend/ent/dailycategory_delete.go | 88 + backend/ent/dailycategory_query.go | 682 ++ backend/ent/dailycategory_update.go | 572 + backend/ent/dailycategorycontent.go | 153 + .../dailycategorycontent.go | 121 + backend/ent/dailycategorycontent/where.go | 183 + backend/ent/dailycategorycontent_create.go | 243 + backend/ent/dailycategorycontent_delete.go | 88 + backend/ent/dailycategorycontent_query.go | 614 + backend/ent/dailycategorycontent_update.go | 388 + backend/ent/dailycontent.go | 153 + backend/ent/dailycontent/dailycontent.go | 121 + backend/ent/dailycontent/where.go | 183 + backend/ent/dailycontent_create.go | 243 + backend/ent/dailycontent_delete.go | 88 + backend/ent/dailycontent_query.go | 614 + backend/ent/dailycontent_update.go | 388 + backend/ent/ent.go | 638 + backend/ent/enttest/enttest.go | 85 + backend/ent/generate.go | 3 + backend/ent/hook/hook.go | 378 + backend/ent/media.go | 222 + backend/ent/media/media.go | 159 + backend/ent/media/where.go | 589 + backend/ent/media_create.go | 372 + backend/ent/media_delete.go | 88 + backend/ent/media_query.go | 614 + backend/ent/media_update.go | 629 + backend/ent/migrate/migrate.go | 64 + backend/ent/migrate/schema.go | 479 + backend/ent/mutation.go | 10117 ++++++++++++++++ backend/ent/permission.go | 176 + backend/ent/permission/permission.go | 129 + backend/ent/permission/where.go | 404 + backend/ent/permission_create.go | 313 + backend/ent/permission_delete.go | 88 + backend/ent/permission_query.go | 637 + backend/ent/permission_update.go | 531 + backend/ent/post.go | 210 + backend/ent/post/post.go | 207 + backend/ent/post/where.go | 320 + backend/ent/post_create.go | 376 + backend/ent/post_delete.go | 88 + backend/ent/post_query.go | 765 ++ backend/ent/post_update.go | 785 ++ backend/ent/postcontent.go | 208 + backend/ent/postcontent/postcontent.go | 167 + backend/ent/postcontent/where.go | 553 + backend/ent/postcontent_create.go | 333 + backend/ent/postcontent_delete.go | 88 + backend/ent/postcontent_query.go | 614 + backend/ent/postcontent_update.go | 624 + backend/ent/postcontributor.go | 217 + .../ent/postcontributor/postcontributor.go | 170 + backend/ent/postcontributor/where.go | 215 + backend/ent/postcontributor_create.go | 336 + backend/ent/postcontributor_delete.go | 88 + backend/ent/postcontributor_query.go | 764 ++ backend/ent/postcontributor_update.go | 615 + backend/ent/predicate/predicate.go | 55 + backend/ent/role.go | 181 + backend/ent/role/role.go | 150 + backend/ent/role/where.go | 357 + backend/ent/role_create.go | 327 + backend/ent/role_delete.go | 88 + backend/ent/role_query.go | 742 ++ backend/ent/role_update.go | 650 + backend/ent/runtime.go | 282 + backend/ent/runtime/runtime.go | 10 + backend/ent/schema/category.go | 33 + backend/ent/schema/categorycontent.go | 51 + backend/ent/schema/contributor.go | 41 + backend/ent/schema/contributorrole.go | 43 + backend/ent/schema/contributorsociallink.go | 40 + backend/ent/schema/daily.go | 45 + backend/ent/schema/dailycategory.go | 32 + backend/ent/schema/dailycategorycontent.go | 41 + backend/ent/schema/dailycontent.go | 45 + backend/ent/schema/media.go | 47 + backend/ent/schema/permission.go | 52 + backend/ent/schema/post.go | 41 + backend/ent/schema/postcontent.go | 55 + backend/ent/schema/postcontributor.go | 50 + backend/ent/schema/role.go | 41 + backend/ent/schema/user.go | 42 + backend/ent/tx.go | 255 + backend/ent/user.go | 207 + backend/ent/user/user.go | 217 + backend/ent/user/where.go | 390 + backend/ent/user_create.go | 389 + backend/ent/user_delete.go | 88 + backend/ent/user_query.go | 787 ++ backend/ent/user_update.go | 883 ++ backend/go.mod | 81 + backend/go.sum | 192 + backend/internal/auth/auth.go | 6 + backend/internal/auth/auth_test.go | 27 + backend/internal/config/config.go | 74 + backend/internal/config/config_test.go | 85 + backend/internal/handler/auth.go | 119 + backend/internal/handler/auth_handler_test.go | 276 + .../internal/handler/category_handler_test.go | 468 + .../handler/contributor_handler_test.go | 443 + .../internal/handler/daily_handler_test.go | 519 + backend/internal/handler/handler.go | 513 + backend/internal/handler/handler_test.go | 43 + backend/internal/handler/media.go | 173 + .../internal/handler/media_handler_test.go | 524 + backend/internal/handler/post_handler_test.go | 611 + backend/internal/middleware/accesslog.go | 192 + backend/internal/middleware/accesslog_test.go | 238 + backend/internal/middleware/auth.go | 82 + backend/internal/middleware/auth_test.go | 217 + backend/internal/middleware/cors.go | 22 + backend/internal/middleware/cors_test.go | 76 + backend/internal/middleware/ratelimit.go | 107 + backend/internal/middleware/ratelimit_test.go | 207 + backend/internal/middleware/rbac.go | 110 + backend/internal/middleware/upload.go | 159 + backend/internal/middleware/upload_test.go | 262 + backend/internal/rbac/init.go | 90 + backend/internal/rbac/init_test.go | 98 + backend/internal/server/database.go | 24 + backend/internal/server/database_test.go | 64 + backend/internal/server/ent.go | 31 + backend/internal/server/ent_test.go | 40 + backend/internal/server/server.go | 90 + backend/internal/server/server_test.go | 220 + backend/internal/service/impl.go | 892 ++ backend/internal/service/impl_test.go | 1092 ++ backend/internal/service/media.go | 179 + backend/internal/service/media_test.go | 332 + backend/internal/service/mock/mock.go | 3 + backend/internal/service/rbac_service.go | 105 + backend/internal/service/service.go | 59 + backend/internal/storage/factory.go | 66 + backend/internal/storage/local.go | 260 + backend/internal/storage/local_test.go | 154 + backend/internal/storage/s3.go | 232 + backend/internal/storage/s3_test.go | 211 + backend/internal/storage/storage.go | 38 + backend/internal/testutil/db.go | 57 + backend/internal/testutil/mock.go | 32 + backend/internal/testutil/testutil.go | 70 + backend/internal/types/config.go | 41 + backend/internal/types/config_test.go | 116 + backend/internal/types/file.go | 8 + backend/internal/types/file_test.go | 21 + backend/internal/types/types.go | 43 + backend/internal/types/types_test.go | 77 + backend/pkg/config/config.go | 44 + backend/pkg/config/config_test.go | 77 + backend/pkg/imageutil/processor.go | 59 + backend/pkg/imageutil/processor_test.go | 100 + backend/pkg/logger/logger.go | 51 + backend/pkg/logger/logger_test.go | 85 + backend/scripts/bundle-openapi.ps1 | 29 + backend/scripts/bundle-openapi.sh | 22 + frontend/.gitignore | 24 + frontend/data/i18n/en.json | 24 + frontend/data/i18n/zh-Hans.json | 24 + frontend/data/i18n/zh-Hant.json | 24 + frontend/eslint.config.js | 28 + frontend/index.html | 24 + frontend/package.json | 44 + frontend/pnpm-lock.yaml | 2799 +++++ frontend/postcss.config.js | 6 + frontend/public/apple-touch-icon.png | Bin 0 -> 7807 bytes frontend/public/favicon-96x96.png | Bin 0 -> 4406 bytes frontend/public/favicon.ico | Bin 0 -> 15086 bytes frontend/public/favicon.svg | 3 + frontend/public/logo.avif | Bin 0 -> 10435 bytes frontend/public/logo.webp | Bin 0 -> 8470 bytes frontend/public/site.webmanifest | 21 + frontend/public/web-app-manifest-192x192.png | Bin 0 -> 8739 bytes frontend/public/web-app-manifest-512x512.png | Bin 0 -> 41804 bytes frontend/src/App.tsx | 38 + frontend/src/components/Footer.tsx | 93 + frontend/src/components/layout/Header.tsx | 239 + frontend/src/hooks/useTheme.ts | 35 + frontend/src/i18n.ts | 23 + frontend/src/index.css | 3 + frontend/src/main.tsx | 11 + frontend/src/pages/Article.tsx | 30 + frontend/src/pages/Daily.tsx | 15 + frontend/src/pages/Home.tsx | 17 + frontend/src/vite-env.d.ts | 1 + frontend/tailwind.config.js | 14 + frontend/tsconfig.app.json | 24 + frontend/tsconfig.json | 7 + frontend/tsconfig.node.json | 22 + frontend/vite.config.ts | 14 + package.json | 13 + pnpm-lock.yaml | 4214 +++++++ pnpm-workspace.yaml | 3 + 267 files changed, 75165 insertions(+) create mode 100644 .forgejo/workflows/build.yml create mode 100644 .forgejo/workflows/test.yml create mode 100644 .gitignore create mode 100644 README.md create mode 100644 api/.gitignore create mode 100644 api/package.json create mode 100644 api/schemas/components/parameters.yaml create mode 100644 api/schemas/components/responses.yaml create mode 100644 api/schemas/components/schemas.yaml create mode 100644 api/schemas/openapi.yaml create mode 100644 api/schemas/paths/auth.yaml create mode 100644 api/schemas/paths/categories.yaml create mode 100644 api/schemas/paths/contributors.yaml create mode 100644 api/schemas/paths/daily.yaml create mode 100644 api/schemas/paths/media.yaml create mode 100644 api/schemas/paths/posts.yaml create mode 100644 api/schemas/paths/users.yaml create mode 100644 backend/.gitignore create mode 100644 backend/Dockerfile create mode 100644 backend/LICENSE.md create mode 100644 backend/README.md create mode 100644 backend/cmd/server/main.go create mode 100644 backend/cmd/server/main_test.go create mode 100644 backend/config/config.yaml create mode 100644 backend/docker-compose.yml create mode 100644 backend/ent/category.go create mode 100644 backend/ent/category/category.go create mode 100644 backend/ent/category/where.go create mode 100644 backend/ent/category_create.go create mode 100644 backend/ent/category_delete.go create mode 100644 backend/ent/category_query.go create mode 100644 backend/ent/category_update.go create mode 100644 backend/ent/categorycontent.go create mode 100644 backend/ent/categorycontent/categorycontent.go create mode 100644 backend/ent/categorycontent/where.go create mode 100644 backend/ent/categorycontent_create.go create mode 100644 backend/ent/categorycontent_delete.go create mode 100644 backend/ent/categorycontent_query.go create mode 100644 backend/ent/categorycontent_update.go create mode 100644 backend/ent/client.go create mode 100644 backend/ent/contributor.go create mode 100644 backend/ent/contributor/contributor.go create mode 100644 backend/ent/contributor/where.go create mode 100644 backend/ent/contributor_create.go create mode 100644 backend/ent/contributor_delete.go create mode 100644 backend/ent/contributor_query.go create mode 100644 backend/ent/contributor_update.go create mode 100644 backend/ent/contributorrole.go create mode 100644 backend/ent/contributorrole/contributorrole.go create mode 100644 backend/ent/contributorrole/where.go create mode 100644 backend/ent/contributorrole_create.go create mode 100644 backend/ent/contributorrole_delete.go create mode 100644 backend/ent/contributorrole_query.go create mode 100644 backend/ent/contributorrole_update.go create mode 100644 backend/ent/contributorsociallink.go create mode 100644 backend/ent/contributorsociallink/contributorsociallink.go create mode 100644 backend/ent/contributorsociallink/where.go create mode 100644 backend/ent/contributorsociallink_create.go create mode 100644 backend/ent/contributorsociallink_delete.go create mode 100644 backend/ent/contributorsociallink_query.go create mode 100644 backend/ent/contributorsociallink_update.go create mode 100644 backend/ent/daily.go create mode 100644 backend/ent/daily/daily.go create mode 100644 backend/ent/daily/where.go create mode 100644 backend/ent/daily_create.go create mode 100644 backend/ent/daily_delete.go create mode 100644 backend/ent/daily_query.go create mode 100644 backend/ent/daily_update.go create mode 100644 backend/ent/dailycategory.go create mode 100644 backend/ent/dailycategory/dailycategory.go create mode 100644 backend/ent/dailycategory/where.go create mode 100644 backend/ent/dailycategory_create.go create mode 100644 backend/ent/dailycategory_delete.go create mode 100644 backend/ent/dailycategory_query.go create mode 100644 backend/ent/dailycategory_update.go create mode 100644 backend/ent/dailycategorycontent.go create mode 100644 backend/ent/dailycategorycontent/dailycategorycontent.go create mode 100644 backend/ent/dailycategorycontent/where.go create mode 100644 backend/ent/dailycategorycontent_create.go create mode 100644 backend/ent/dailycategorycontent_delete.go create mode 100644 backend/ent/dailycategorycontent_query.go create mode 100644 backend/ent/dailycategorycontent_update.go create mode 100644 backend/ent/dailycontent.go create mode 100644 backend/ent/dailycontent/dailycontent.go create mode 100644 backend/ent/dailycontent/where.go create mode 100644 backend/ent/dailycontent_create.go create mode 100644 backend/ent/dailycontent_delete.go create mode 100644 backend/ent/dailycontent_query.go create mode 100644 backend/ent/dailycontent_update.go create mode 100644 backend/ent/ent.go create mode 100644 backend/ent/enttest/enttest.go create mode 100644 backend/ent/generate.go create mode 100644 backend/ent/hook/hook.go create mode 100644 backend/ent/media.go create mode 100644 backend/ent/media/media.go create mode 100644 backend/ent/media/where.go create mode 100644 backend/ent/media_create.go create mode 100644 backend/ent/media_delete.go create mode 100644 backend/ent/media_query.go create mode 100644 backend/ent/media_update.go create mode 100644 backend/ent/migrate/migrate.go create mode 100644 backend/ent/migrate/schema.go create mode 100644 backend/ent/mutation.go create mode 100644 backend/ent/permission.go create mode 100644 backend/ent/permission/permission.go create mode 100644 backend/ent/permission/where.go create mode 100644 backend/ent/permission_create.go create mode 100644 backend/ent/permission_delete.go create mode 100644 backend/ent/permission_query.go create mode 100644 backend/ent/permission_update.go create mode 100644 backend/ent/post.go create mode 100644 backend/ent/post/post.go create mode 100644 backend/ent/post/where.go create mode 100644 backend/ent/post_create.go create mode 100644 backend/ent/post_delete.go create mode 100644 backend/ent/post_query.go create mode 100644 backend/ent/post_update.go create mode 100644 backend/ent/postcontent.go create mode 100644 backend/ent/postcontent/postcontent.go create mode 100644 backend/ent/postcontent/where.go create mode 100644 backend/ent/postcontent_create.go create mode 100644 backend/ent/postcontent_delete.go create mode 100644 backend/ent/postcontent_query.go create mode 100644 backend/ent/postcontent_update.go create mode 100644 backend/ent/postcontributor.go create mode 100644 backend/ent/postcontributor/postcontributor.go create mode 100644 backend/ent/postcontributor/where.go create mode 100644 backend/ent/postcontributor_create.go create mode 100644 backend/ent/postcontributor_delete.go create mode 100644 backend/ent/postcontributor_query.go create mode 100644 backend/ent/postcontributor_update.go create mode 100644 backend/ent/predicate/predicate.go create mode 100644 backend/ent/role.go create mode 100644 backend/ent/role/role.go create mode 100644 backend/ent/role/where.go create mode 100644 backend/ent/role_create.go create mode 100644 backend/ent/role_delete.go create mode 100644 backend/ent/role_query.go create mode 100644 backend/ent/role_update.go create mode 100644 backend/ent/runtime.go create mode 100644 backend/ent/runtime/runtime.go create mode 100644 backend/ent/schema/category.go create mode 100644 backend/ent/schema/categorycontent.go create mode 100644 backend/ent/schema/contributor.go create mode 100644 backend/ent/schema/contributorrole.go create mode 100644 backend/ent/schema/contributorsociallink.go create mode 100644 backend/ent/schema/daily.go create mode 100644 backend/ent/schema/dailycategory.go create mode 100644 backend/ent/schema/dailycategorycontent.go create mode 100644 backend/ent/schema/dailycontent.go create mode 100644 backend/ent/schema/media.go create mode 100644 backend/ent/schema/permission.go create mode 100644 backend/ent/schema/post.go create mode 100644 backend/ent/schema/postcontent.go create mode 100644 backend/ent/schema/postcontributor.go create mode 100644 backend/ent/schema/role.go create mode 100644 backend/ent/schema/user.go create mode 100644 backend/ent/tx.go create mode 100644 backend/ent/user.go create mode 100644 backend/ent/user/user.go create mode 100644 backend/ent/user/where.go create mode 100644 backend/ent/user_create.go create mode 100644 backend/ent/user_delete.go create mode 100644 backend/ent/user_query.go create mode 100644 backend/ent/user_update.go create mode 100644 backend/go.mod create mode 100644 backend/go.sum create mode 100644 backend/internal/auth/auth.go create mode 100644 backend/internal/auth/auth_test.go create mode 100644 backend/internal/config/config.go create mode 100644 backend/internal/config/config_test.go create mode 100644 backend/internal/handler/auth.go create mode 100644 backend/internal/handler/auth_handler_test.go create mode 100644 backend/internal/handler/category_handler_test.go create mode 100644 backend/internal/handler/contributor_handler_test.go create mode 100644 backend/internal/handler/daily_handler_test.go create mode 100644 backend/internal/handler/handler.go create mode 100644 backend/internal/handler/handler_test.go create mode 100644 backend/internal/handler/media.go create mode 100644 backend/internal/handler/media_handler_test.go create mode 100644 backend/internal/handler/post_handler_test.go create mode 100644 backend/internal/middleware/accesslog.go create mode 100644 backend/internal/middleware/accesslog_test.go create mode 100644 backend/internal/middleware/auth.go create mode 100644 backend/internal/middleware/auth_test.go create mode 100644 backend/internal/middleware/cors.go create mode 100644 backend/internal/middleware/cors_test.go create mode 100644 backend/internal/middleware/ratelimit.go create mode 100644 backend/internal/middleware/ratelimit_test.go create mode 100644 backend/internal/middleware/rbac.go create mode 100644 backend/internal/middleware/upload.go create mode 100644 backend/internal/middleware/upload_test.go create mode 100644 backend/internal/rbac/init.go create mode 100644 backend/internal/rbac/init_test.go create mode 100644 backend/internal/server/database.go create mode 100644 backend/internal/server/database_test.go create mode 100644 backend/internal/server/ent.go create mode 100644 backend/internal/server/ent_test.go create mode 100644 backend/internal/server/server.go create mode 100644 backend/internal/server/server_test.go create mode 100644 backend/internal/service/impl.go create mode 100644 backend/internal/service/impl_test.go create mode 100644 backend/internal/service/media.go create mode 100644 backend/internal/service/media_test.go create mode 100644 backend/internal/service/mock/mock.go create mode 100644 backend/internal/service/rbac_service.go create mode 100644 backend/internal/service/service.go create mode 100644 backend/internal/storage/factory.go create mode 100644 backend/internal/storage/local.go create mode 100644 backend/internal/storage/local_test.go create mode 100644 backend/internal/storage/s3.go create mode 100644 backend/internal/storage/s3_test.go create mode 100644 backend/internal/storage/storage.go create mode 100644 backend/internal/testutil/db.go create mode 100644 backend/internal/testutil/mock.go create mode 100644 backend/internal/testutil/testutil.go create mode 100644 backend/internal/types/config.go create mode 100644 backend/internal/types/config_test.go create mode 100644 backend/internal/types/file.go create mode 100644 backend/internal/types/file_test.go create mode 100644 backend/internal/types/types.go create mode 100644 backend/internal/types/types_test.go create mode 100644 backend/pkg/config/config.go create mode 100644 backend/pkg/config/config_test.go create mode 100644 backend/pkg/imageutil/processor.go create mode 100644 backend/pkg/imageutil/processor_test.go create mode 100644 backend/pkg/logger/logger.go create mode 100644 backend/pkg/logger/logger_test.go create mode 100644 backend/scripts/bundle-openapi.ps1 create mode 100644 backend/scripts/bundle-openapi.sh create mode 100644 frontend/.gitignore create mode 100644 frontend/data/i18n/en.json create mode 100644 frontend/data/i18n/zh-Hans.json create mode 100644 frontend/data/i18n/zh-Hant.json create mode 100644 frontend/eslint.config.js create mode 100644 frontend/index.html create mode 100644 frontend/package.json create mode 100644 frontend/pnpm-lock.yaml create mode 100644 frontend/postcss.config.js create mode 100644 frontend/public/apple-touch-icon.png create mode 100644 frontend/public/favicon-96x96.png create mode 100644 frontend/public/favicon.ico create mode 100644 frontend/public/favicon.svg create mode 100644 frontend/public/logo.avif create mode 100644 frontend/public/logo.webp create mode 100644 frontend/public/site.webmanifest create mode 100644 frontend/public/web-app-manifest-192x192.png create mode 100644 frontend/public/web-app-manifest-512x512.png create mode 100644 frontend/src/App.tsx create mode 100644 frontend/src/components/Footer.tsx create mode 100644 frontend/src/components/layout/Header.tsx create mode 100644 frontend/src/hooks/useTheme.ts create mode 100644 frontend/src/i18n.ts create mode 100644 frontend/src/index.css create mode 100644 frontend/src/main.tsx create mode 100644 frontend/src/pages/Article.tsx create mode 100644 frontend/src/pages/Daily.tsx create mode 100644 frontend/src/pages/Home.tsx create mode 100644 frontend/src/vite-env.d.ts create mode 100644 frontend/tailwind.config.js create mode 100644 frontend/tsconfig.app.json create mode 100644 frontend/tsconfig.json create mode 100644 frontend/tsconfig.node.json create mode 100644 frontend/vite.config.ts create mode 100644 package.json create mode 100644 pnpm-lock.yaml create mode 100644 pnpm-workspace.yaml diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml new file mode 100644 index 0000000..b81db26 --- /dev/null +++ b/.forgejo/workflows/build.yml @@ -0,0 +1,50 @@ +name: Build Backend + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + build: + name: Build Docker Image + runs-on: docker + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Configure Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: remote + endpoint: tcp://${{ secrets.AMD64_BUILDER_HOST }}:${{ secrets.AMD64_BUILDER_PORT }} + platforms: linux/amd64 + name: ${{ secrets.AMD64_BUILDER_NAME }} + driver-opts: | + servername=${{ secrets.AMD64_BUILDER_NAME }} + append: | + - endpoint: tcp://${{ secrets.ARM64_BUILDER_HOST }}:${{ secrets.ARM64_BUILDER_PORT }} + platforms: linux/arm64 + name: ${{ secrets.ARM64_BUILDER_NAME }} + driver-opts: + - "servername=${{ secrets.ARM64_BUILDER_NAME }}" + env: + BUILDER_NODE_0_AUTH_TLS_CACERT: ${{ secrets.AMD64_BUILDER_CACERT }} + BUILDER_NODE_0_AUTH_TLS_CERT: ${{ secrets.AMD64_BUILDER_CERT }} + BUILDER_NODE_0_AUTH_TLS_KEY: ${{ secrets.AMD64_BUILDER_KEY }} + BUILDER_NODE_1_AUTH_TLS_CACERT: ${{ secrets.ARM64_BUILDER_CACERT }} + BUILDER_NODE_1_AUTH_TLS_CERT: ${{ secrets.ARM64_BUILDER_CERT }} + BUILDER_NODE_1_AUTH_TLS_KEY: ${{ secrets.ARM64_BUILDER_KEY }} + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: git.owu.one + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_TOKEN }} + - name: Build and Push + uses: docker/build-push-action@v6 + with: + context: backend + platforms: linux/amd64,linux/arm64 + push: true + tags: git.owu.one/${{ github.repository_owner }}/tss-rocks-be:latest diff --git a/.forgejo/workflows/test.yml b/.forgejo/workflows/test.yml new file mode 100644 index 0000000..8db55fc --- /dev/null +++ b/.forgejo/workflows/test.yml @@ -0,0 +1,33 @@ +name: Test Backend + +on: + push: + branches: + - main + +jobs: + test: + runs-on: [docker, amd64] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 'stable' + + - name: Set up dependencies + run: go mod download + + - name: Install mockgen + run: go install go.uber.org/mock/mockgen@latest + + - name: Generate mocks + run: go generate ./... + working-directory: backend + + - name: Run tests + run: go test -v ./... + working-directory: backend diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..40b878d --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +node_modules/ \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..cf7430e --- /dev/null +++ b/README.md @@ -0,0 +1,48 @@ +# TSS Rocks + +A monorepo containing the TSS Rocks project. + +## Project Structure + +- `backend/`: Go backend service +- `frontend/`: React frontend application (including user portal and admin dashboard) +- `api/`: Shared API type definitions generated from OpenAPI specs + +## Development + +### Prerequisites + +- Node.js (v18+) +- pnpm +- Go (v1.21+) + +### Setup + +1. Install dependencies: +```bash +pnpm install +``` + +2. Generate API types: +```bash +pnpm generate-types +``` + +3. Start development servers: + +Frontend: +```bash +pnpm dev +``` + +Backend: +```bash +cd backend +go run ./cmd/server +``` + +## Building + +```bash +pnpm build +``` diff --git a/api/.gitignore b/api/.gitignore new file mode 100644 index 0000000..e988657 --- /dev/null +++ b/api/.gitignore @@ -0,0 +1,4 @@ +node_modules/ + +dist/openapi.yaml +dist/openapi.json diff --git a/api/package.json b/api/package.json new file mode 100644 index 0000000..e8e9aa7 --- /dev/null +++ b/api/package.json @@ -0,0 +1,17 @@ +{ + "name": "@tss-rocks/api", + "version": "0.0.1", + "private": true, + "scripts": { + "bundle:json": "redocly bundle ./schemas/openapi.yaml --output ./dist/openapi.json --ext json", + "bundle:yaml": "redocly bundle ./schemas/openapi.yaml --output ./dist/openapi.yaml --ext yaml", + "bundle": "pnpm bundle:json && pnpm bundle:yaml", + "build": "pnpm bundle", + "lint": "redocly lint ./schemas/openapi.yaml", + "preview": "redocly preview-docs ./schemas/openapi.yaml", + "docs": "redocly build-docs ./schemas/openapi.yaml --output ./dist/docs.html" + }, + "devDependencies": { + "@redocly/cli": "^1.8.0" + } +} diff --git a/api/schemas/components/parameters.yaml b/api/schemas/components/parameters.yaml new file mode 100644 index 0000000..3f5d9c6 --- /dev/null +++ b/api/schemas/components/parameters.yaml @@ -0,0 +1,70 @@ +Page: + in: query + name: page + schema: + type: integer + minimum: 1 + default: 1 + description: 页码 + required: false + +PerPage: + in: query + name: per_page + schema: + type: integer + minimum: 1 + maximum: 100 + default: 20 + description: 每页记录数 + required: false + +Language: + in: query + name: lang + schema: + type: string + enum: + - en + - zh-Hans + - zh-Hant + description: 语言代码 + required: false + +Sort: + in: query + name: sort + schema: + type: string + pattern: '^[a-zA-Z_]+:(asc|desc)$' + examples: + - 'created_at:desc' + description: '排序字段和方向,格式为 field:(asc|desc)' + required: false + +Status: + in: query + name: status + schema: + type: string + enum: + - draft + - published + description: 内容状态过滤 + required: false + +Id: + in: path + name: id + schema: + type: integer + required: true + description: 资源 ID + +Slug: + in: path + name: slug + schema: + type: string + required: true + description: 资源 Slug diff --git a/api/schemas/components/responses.yaml b/api/schemas/components/responses.yaml new file mode 100644 index 0000000..8c46463 --- /dev/null +++ b/api/schemas/components/responses.yaml @@ -0,0 +1,67 @@ +Unauthorized: + description: 未授权访问 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: UNAUTHORIZED + message: 未授权访问,请先登录 + +Forbidden: + description: 禁止访问 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: FORBIDDEN + message: 您没有权限执行此操作 + +NotFound: + description: 资源未找到 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: NOT_FOUND + message: 请求的资源不存在 + +ValidationError: + description: 参数验证失败 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: VALIDATION_ERROR + message: 参数验证失败 + details: + field: 错误描述 + +TooManyRequests: + description: 请求过多 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: TOO_MANY_REQUESTS + message: 请求过于频繁,请稍后再试 + +InternalError: + description: 服务器内部错误 + content: + application/json: + schema: + $ref: './schemas.yaml#/Error' + example: + error: + code: INTERNAL_ERROR + message: 服务器内部错误,请稍后再试 diff --git a/api/schemas/components/schemas.yaml b/api/schemas/components/schemas.yaml new file mode 100644 index 0000000..dba9082 --- /dev/null +++ b/api/schemas/components/schemas.yaml @@ -0,0 +1,375 @@ +Response: + type: object + required: + - data + properties: + data: {} + meta: + type: object + properties: + total: + type: integer + description: 总记录数 + page: + type: integer + description: 当前页码 + per_page: + type: integer + description: 每页记录数 + total_pages: + type: integer + description: 总页数 + links: + type: object + properties: + self: + type: string + format: uri + first: + type: string + format: uri + last: + type: string + format: uri + prev: + type: string + format: uri + next: + type: string + format: uri + +Error: + type: object + required: + - error + properties: + error: + type: object + required: + - code + - message + properties: + code: + type: string + message: + type: string + details: + type: object + +User: + type: object + required: + - id + - email + - role + - status + properties: + id: + type: integer + email: + type: string + format: email + role: + type: string + enum: + - admin + - editor + status: + type: string + enum: + - active + - inactive + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + +Post: + type: object + required: + - id + - slug + - status + - contents + properties: + id: + type: integer + slug: + type: string + status: + type: string + enum: + - draft + - published + contents: + type: array + items: + $ref: '#/PostContent' + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + +PostContent: + type: object + required: + - language_code + - title + - content_markdown + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + title: + type: string + content_markdown: + type: string + summary: + type: string + meta_keywords: + type: string + meta_description: + type: string + +Category: + type: object + required: + - id + - contents + properties: + id: + type: integer + contents: + type: array + items: + $ref: '#/CategoryContent' + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + +CategoryContent: + type: object + required: + - language_code + - name + - slug + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + name: + type: string + description: + type: string + slug: + type: string + +Contributor: + type: object + required: + - id + - name + properties: + id: + type: integer + name: + type: string + avatar_url: + type: string + format: uri + bio: + type: string + social_links: + type: array + items: + type: object + required: + - type + - value + properties: + type: + type: string + enum: + - WeChat + - Bilibili + - Douyin/TikTok + - Kuaishou + - NCM + - Xiaohongshu + - QQ_Personal + - QQ_Group + - QQ_Channel + - Zhihu + - SinaWeibo + - Feishu/Lark + - DingTalk + - Douban + - CoolAPK + - Tieba + - Keyoxide + - GitHub + - Codeberg + - SourceHut + - Forgejo + - Gitea + - Facebook + - Instagram + - Twitter/X + - Snapchat + - Telegram + - WhatsApp + - LinkedIn + - Reddit + - YouTube + - Pinterest + - Bluesky + - Mastodon + - Fediverse + - Matrix + - XMPP + - IRC + - HomePage + - AFDian + - Paypal + - LiberPay + - Patreon + - Ko-Fi + - Custom + name: + type: string + description: 'Optional name for the social link, required only for Custom type' + value: + type: string + description: The actual link or identifier value + user_id: + type: integer + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + +Media: + type: object + required: + - id + - url + - mime_type + - original_name + - storage_id + - size + properties: + id: + type: integer + storage_id: + type: string + description: 存储系统中的唯一标识符 + original_name: + type: string + description: 原始文件名 + url: + type: string + format: uri + description: 文件访问 URL + mime_type: + type: string + description: 文件的 MIME 类型,图片将被转换为 image/webp + size: + type: integer + format: int64 + description: 文件大小(字节) + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + created_by: + type: string + description: 创建者 ID + +DailyCategory: + type: object + required: + - id + - contents + properties: + id: + type: integer + contents: + type: array + items: + type: object + required: + - language_code + - name + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + name: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + +Daily: + type: object + required: + - id + - category + - image_url + - contents + properties: + id: + type: string + pattern: '^\d{6}$' + examples: + - '250206' + category: + $ref: '#/DailyCategory' + image_url: + type: string + format: uri + contents: + type: array + items: + type: object + required: + - language_code + - quote + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + quote: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time diff --git a/api/schemas/openapi.yaml b/api/schemas/openapi.yaml new file mode 100644 index 0000000..693fe54 --- /dev/null +++ b/api/schemas/openapi.yaml @@ -0,0 +1,123 @@ +openapi: 3.1.0 +info: + title: TSS Rocks API + description: | + TSS Rocks REST API + version: 0.1.0 + contact: + name: STARSET Mirror + url: 'https://mirror.starset.fans/about' +servers: + - url: 'https://tss.rocks/api/v1' + description: API v1 endpoints +security: + - BearerAuth: [] +components: + securitySchemes: + BearerAuth: + type: http + scheme: bearer + bearerFormat: JWT + parameters: + Page: + $ref: './components/parameters.yaml#/Page' + PerPage: + $ref: './components/parameters.yaml#/PerPage' + Language: + $ref: './components/parameters.yaml#/Language' + Sort: + $ref: './components/parameters.yaml#/Sort' + Status: + $ref: './components/parameters.yaml#/Status' + Id: + $ref: './components/parameters.yaml#/Id' + Slug: + $ref: './components/parameters.yaml#/Slug' + responses: + Unauthorized: + $ref: './components/responses.yaml#/Unauthorized' + Forbidden: + $ref: './components/responses.yaml#/Forbidden' + NotFound: + $ref: './components/responses.yaml#/NotFound' + ValidationError: + $ref: './components/responses.yaml#/ValidationError' + TooManyRequests: + $ref: './components/responses.yaml#/TooManyRequests' + InternalError: + $ref: './components/responses.yaml#/InternalError' + schemas: + Response: + $ref: './components/schemas.yaml#/Response' + Error: + $ref: './components/schemas.yaml#/Error' + User: + $ref: './components/schemas.yaml#/User' + Post: + $ref: './components/schemas.yaml#/Post' + PostContent: + $ref: './components/schemas.yaml#/PostContent' + Category: + $ref: './components/schemas.yaml#/Category' + CategoryContent: + $ref: './components/schemas.yaml#/CategoryContent' + Contributor: + $ref: './components/schemas.yaml#/Contributor' + Media: + $ref: './components/schemas.yaml#/Media' + DailyCategory: + $ref: './components/schemas.yaml#/DailyCategory' + Daily: + $ref: './components/schemas.yaml#/Daily' +paths: + /auth/login: + $ref: './paths/auth.yaml#/login' + /auth/logout: + $ref: './paths/auth.yaml#/logout' + /posts: + $ref: './paths/posts.yaml#/posts' + '/posts/{slug}': + $ref: './paths/posts.yaml#/post_slug' + '/posts/{slug}/contributors': + $ref: './paths/posts.yaml#/post_contributors' + /categories: + $ref: './paths/categories.yaml#/categories' + '/categories/{slug}': + $ref: './paths/categories.yaml#/category_slug' + '/categories/{slug}/posts': + $ref: './paths/categories.yaml#/category_posts' + /contributors: + $ref: './paths/contributors.yaml#/contributors' + '/contributors/{id}': + $ref: './paths/contributors.yaml#/contributor_id' + '/contributors/{id}/posts': + $ref: './paths/contributors.yaml#/contributor_posts' + /users: + $ref: './paths/users.yaml#/users' + '/users/{id}': + $ref: './paths/users.yaml#/user_id' + /users/me: + $ref: './paths/users.yaml#/user_me' + /media: + $ref: './paths/media.yaml#/media' + '/media/{id}': + $ref: './paths/media.yaml#/media_id' + /daily: + $ref: './paths/daily.yaml#/daily' + '/daily/{id}': + $ref: './paths/daily.yaml#/daily_id' +tags: + - name: auth + description: 认证相关接口 + - name: posts + description: 文章管理接口 + - name: categories + description: 分类管理接口 + - name: contributors + description: 贡献者管理接口 + - name: media + description: 媒体文件管理接口 + - name: daily + description: 每日一图管理接口 + - name: users + description: 用户管理接口 diff --git a/api/schemas/paths/auth.yaml b/api/schemas/paths/auth.yaml new file mode 100644 index 0000000..cbe67cb --- /dev/null +++ b/api/schemas/paths/auth.yaml @@ -0,0 +1,54 @@ +login: + post: + tags: + - auth + summary: 用户登录 + operationId: login + security: [] # 登录接口不需要认证 + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - email + - password + properties: + email: + type: string + format: email + password: + type: string + format: password + responses: + '200': + description: 登录成功 + content: + application/json: + schema: + type: object + required: + - token + - user + properties: + token: + type: string + user: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +logout: + post: + tags: + - auth + summary: 用户登出 + operationId: logout + responses: + '204': + description: 登出成功 + '401': + $ref: '../components/responses.yaml#/Unauthorized' diff --git a/api/schemas/paths/categories.yaml b/api/schemas/paths/categories.yaml new file mode 100644 index 0000000..a4b0c8b --- /dev/null +++ b/api/schemas/paths/categories.yaml @@ -0,0 +1,165 @@ +categories: + get: + tags: + - categories + summary: 获取分类列表 + operationId: listCategories + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Language' + - $ref: '../components/parameters.yaml#/Sort' + responses: + '200': + description: 成功获取分类列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Category' + post: + tags: + - categories + summary: 创建新分类 + operationId: createCategory + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - contents + properties: + contents: + type: array + items: + $ref: '../components/schemas.yaml#/CategoryContent' + responses: + '201': + description: 分类创建成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Category' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +category_slug: + parameters: + - $ref: '../components/parameters.yaml#/Slug' + get: + tags: + - categories + summary: 获取分类详情 + operationId: getCategory + parameters: + - $ref: '../components/parameters.yaml#/Language' + responses: + '200': + description: 成功获取分类详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Category' + '404': + $ref: '../components/responses.yaml#/NotFound' + put: + tags: + - categories + summary: 更新分类 + operationId: updateCategory + parameters: + - $ref: '../components/parameters.yaml#/Slug' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + contents: + type: array + items: + $ref: '../components/schemas.yaml#/CategoryContent' + responses: + '200': + description: 分类更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Category' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + $ref: '../components/responses.yaml#/ValidationError' + delete: + tags: + - categories + summary: 删除分类 + operationId: deleteCategory + parameters: + - $ref: '../components/parameters.yaml#/Slug' + responses: + '204': + description: 分类删除成功 + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + +category_posts: + parameters: + - $ref: '../components/parameters.yaml#/Slug' + get: + tags: + - categories + summary: 获取分类下的文章列表 + operationId: getCategoryPosts + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Language' + - $ref: '../components/parameters.yaml#/Sort' + - $ref: '../components/parameters.yaml#/Status' + responses: + '200': + description: 成功获取文章列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Post' + '404': + $ref: '../components/responses.yaml#/NotFound' diff --git a/api/schemas/paths/contributors.yaml b/api/schemas/paths/contributors.yaml new file mode 100644 index 0000000..f58be48 --- /dev/null +++ b/api/schemas/paths/contributors.yaml @@ -0,0 +1,139 @@ +contributors: + get: + tags: + - contributors + summary: 获取贡献者列表 + operationId: listContributors + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Sort' + responses: + '200': + description: 成功获取贡献者列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Contributor' + post: + tags: + - contributors + summary: 创建新贡献者 + operationId: createContributor + requestBody: + required: true + content: + application/json: + schema: + $ref: '../components/schemas.yaml#/Contributor' + responses: + '201': + description: 贡献者创建成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Contributor' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +contributor_id: + parameters: + - $ref: '../components/parameters.yaml#/Id' + get: + tags: + - contributors + summary: 获取贡献者详情 + operationId: getContributor + responses: + '200': + description: 成功获取贡献者详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Contributor' + '404': + $ref: '../components/responses.yaml#/NotFound' + put: + tags: + - contributors + summary: 更新贡献者信息 + operationId: updateContributor + parameters: + - $ref: '../components/parameters.yaml#/Id' + requestBody: + required: true + content: + application/json: + schema: + $ref: '../components/schemas.yaml#/Contributor' + responses: + '200': + description: 贡献者信息更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Contributor' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +contributor_posts: + parameters: + - $ref: '../components/parameters.yaml#/Id' + get: + tags: + - contributors + summary: 获取贡献者参与的文章列表 + operationId: getContributorPosts + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Language' + - $ref: '../components/parameters.yaml#/Sort' + - $ref: '../components/parameters.yaml#/Status' + - name: role + in: query + schema: + type: string + description: 按贡献者角色筛选 + responses: + '200': + description: 成功获取文章列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Post' + '404': + $ref: '../components/responses.yaml#/NotFound' diff --git a/api/schemas/paths/daily.yaml b/api/schemas/paths/daily.yaml new file mode 100644 index 0000000..55858cf --- /dev/null +++ b/api/schemas/paths/daily.yaml @@ -0,0 +1,170 @@ +daily: + get: + tags: + - daily + summary: 获取每日一图列表 + operationId: listDaily + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Sort' + - $ref: '../components/parameters.yaml#/Language' + - name: category_id + in: query + schema: + type: integer + description: 按分类ID筛选 + responses: + '200': + description: 成功获取每日一图列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Daily' + post: + tags: + - daily + summary: 创建每日一图 + operationId: createDaily + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - id + - category_id + - image_url + - contents + properties: + id: + type: string + pattern: '^\d{6}$' + examples: + - '250206' + category_id: + type: integer + image_url: + type: string + format: uri + contents: + type: array + items: + type: object + required: + - language_code + - quote + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + quote: + type: string + responses: + '201': + description: 每日一图创建成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Daily' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +daily_id: + parameters: + - name: id + in: path + required: true + schema: + type: string + pattern: '^\d{6}$' + examples: + - '250206' + get: + tags: + - daily + summary: 获取每日一图详情 + operationId: getDaily + parameters: + - $ref: '../components/parameters.yaml#/Language' + responses: + '200': + description: 成功获取每日一图详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Daily' + '404': + $ref: '../components/responses.yaml#/NotFound' + put: + tags: + - daily + summary: 更新每日一图 + operationId: updateDaily + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + category_id: + type: integer + image_url: + type: string + format: uri + contents: + type: array + items: + type: object + required: + - language_code + - quote + properties: + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + quote: + type: string + responses: + '200': + description: 每日一图更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Daily' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + $ref: '../components/responses.yaml#/ValidationError' diff --git a/api/schemas/paths/media.yaml b/api/schemas/paths/media.yaml new file mode 100644 index 0000000..58c6df9 --- /dev/null +++ b/api/schemas/paths/media.yaml @@ -0,0 +1,111 @@ +media: + get: + tags: + - media + summary: 获取媒体文件列表 + operationId: listMedia + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Sort' + - name: mime_type + in: query + schema: + type: string + description: 按 MIME 类型筛选 + responses: + '200': + description: 成功获取媒体文件列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Media' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + post: + tags: + - media + summary: 上传媒体文件 + description: | + 上传媒体文件。对于图片文件: + - 支持的输入格式:JPEG、PNG、GIF + - 所有图片将自动转换为 WebP 格式 + - 使用无损压缩以保持图片质量 + - 保持原始图片尺寸 + operationId: uploadMedia + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + required: + - file + properties: + file: + type: string + format: binary + description: | + 要上传的文件。 + 对于图片文件,将自动转换为 WebP 格式。 + 支持的图片格式:image/jpeg, image/png, image/gif + responses: + '201': + description: 媒体文件上传成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Media' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +media_id: + parameters: + - $ref: '../components/parameters.yaml#/Id' + get: + tags: + - media + summary: 获取媒体文件详情 + operationId: getMedia + responses: + '200': + description: 成功获取媒体文件详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Media' + '404': + $ref: '../components/responses.yaml#/NotFound' + delete: + tags: + - media + summary: 删除媒体文件 + operationId: deleteMedia + parameters: + - $ref: '../components/parameters.yaml#/Id' + responses: + '204': + description: 媒体文件删除成功 + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' diff --git a/api/schemas/paths/posts.yaml b/api/schemas/paths/posts.yaml new file mode 100644 index 0000000..f56c040 --- /dev/null +++ b/api/schemas/paths/posts.yaml @@ -0,0 +1,198 @@ +posts: + get: + tags: + - posts + summary: 获取文章列表 + operationId: listPosts + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Language' + - $ref: '../components/parameters.yaml#/Sort' + - $ref: '../components/parameters.yaml#/Status' + - name: category + in: query + schema: + type: string + description: 按分类筛选 + responses: + '200': + description: 成功获取文章列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/Post' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + post: + tags: + - posts + summary: 创建新文章 + operationId: createPost + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - slug + - contents + properties: + slug: + type: string + status: + type: string + enum: + - draft + - published + default: draft + contents: + type: array + items: + $ref: '../components/schemas.yaml#/PostContent' + category_ids: + type: array + items: + type: integer + responses: + '201': + description: 文章创建成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Post' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +post_slug: + parameters: + - $ref: '../components/parameters.yaml#/Slug' + get: + tags: + - posts + summary: 获取文章详情 + operationId: getPost + parameters: + - $ref: '../components/parameters.yaml#/Language' + responses: + '200': + description: 成功获取文章详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Post' + '404': + $ref: '../components/responses.yaml#/NotFound' + put: + tags: + - posts + summary: 更新文章 + operationId: updatePost + parameters: + - $ref: '../components/parameters.yaml#/Slug' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + status: + type: string + enum: + - draft + - published + contents: + type: array + items: + $ref: '../components/schemas.yaml#/PostContent' + category_ids: + type: array + items: + type: integer + responses: + '200': + description: 文章更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/Post' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + $ref: '../components/responses.yaml#/ValidationError' + delete: + tags: + - posts + summary: 删除文章 + operationId: deletePost + parameters: + - $ref: '../components/parameters.yaml#/Slug' + responses: + '204': + description: 文章删除成功 + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + +post_contributors: + parameters: + - $ref: '../components/parameters.yaml#/Slug' + get: + tags: + - posts + summary: 获取文章贡献者列表 + operationId: getPostContributors + responses: + '200': + description: 成功获取贡献者列表 + content: + application/json: + schema: + type: object + properties: + data: + type: array + items: + type: object + properties: + contributor: + $ref: '../components/schemas.yaml#/Contributor' + role: + type: string + language_code: + type: string + enum: + - en + - zh-Hans + - zh-Hant + '404': + $ref: '../components/responses.yaml#/NotFound' diff --git a/api/schemas/paths/users.yaml b/api/schemas/paths/users.yaml new file mode 100644 index 0000000..897adad --- /dev/null +++ b/api/schemas/paths/users.yaml @@ -0,0 +1,240 @@ +users: + get: + tags: + - users + summary: 获取用户列表 + operationId: listUsers + parameters: + - $ref: '../components/parameters.yaml#/Page' + - $ref: '../components/parameters.yaml#/PerPage' + - $ref: '../components/parameters.yaml#/Sort' + - name: role + in: query + schema: + type: string + enum: + - admin + - editor + description: 按角色筛选 + - name: status + in: query + schema: + type: string + enum: + - active + - inactive + description: 按状态筛选 + - name: email + in: query + schema: + type: string + description: 按邮箱搜索 + responses: + '200': + description: 成功获取用户列表 + content: + application/json: + schema: + allOf: + - $ref: '../components/schemas.yaml#/Response' + - type: object + properties: + data: + type: array + items: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + post: + tags: + - users + summary: 创建新用户 + operationId: createUser + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - email + - password + - role + properties: + email: + type: string + format: email + password: + type: string + minLength: 8 + role: + type: string + enum: + - admin + - editor + responses: + '201': + description: 用户创建成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '422': + $ref: '../components/responses.yaml#/ValidationError' + +user_id: + parameters: + - name: id + in: path + required: true + schema: + type: integer + get: + tags: + - users + summary: 获取用户详情 + operationId: getUser + responses: + '200': + description: 成功获取用户详情 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + put: + tags: + - users + summary: 更新用户信息 + operationId: updateUser + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + email: + type: string + format: email + password: + type: string + minLength: 8 + role: + type: string + enum: + - admin + - editor + status: + type: string + enum: + - active + - inactive + responses: + '200': + description: 用户更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + $ref: '../components/responses.yaml#/ValidationError' + delete: + tags: + - users + summary: 删除用户 + operationId: deleteUser + responses: + '204': + description: 用户删除成功 + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '403': + $ref: '../components/responses.yaml#/Forbidden' + '404': + $ref: '../components/responses.yaml#/NotFound' + '422': + description: 验证错误,例如用户还有关联的内容 + content: + application/json: + schema: + $ref: '../components/schemas.yaml#/Error' + +user_me: + get: + tags: + - users + summary: 获取当前用户信息 + operationId: getCurrentUser + responses: + '200': + description: 成功获取当前用户信息 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + put: + tags: + - users + summary: 更新当前用户信息 + operationId: updateCurrentUser + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + email: + type: string + format: email + current_password: + type: string + new_password: + type: string + minLength: 8 + responses: + '200': + description: 用户信息更新成功 + content: + application/json: + schema: + type: object + properties: + data: + $ref: '../components/schemas.yaml#/User' + '401': + $ref: '../components/responses.yaml#/Unauthorized' + '422': + $ref: '../components/responses.yaml#/ValidationError' diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..1193d52 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,42 @@ +# IDE and editor files +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Dependencies +node_modules/ +vendor/ + +# Build output +*.exe + +# Database files +*.db +*.db-journal +*.sqlite +*.sqlite3 + +# Media storage +storage/media/ + +# Log files +*.log + +# Environment files +.env +.env.* +!.env.example + +# OS generated files +.DS_Store +Thumbs.db + +# Test coverage +coverage +coverage.out +coverage.html + +# Generated mocks +**/mock/mock_*.go diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..59d675d --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:alpine AS builder + +WORKDIR /app +RUN apk add --no-cache gcc musl-dev libwebp-dev +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN go build -o tss-rocks-be ./cmd/server + +FROM alpine:latest + +RUN apk add --no-cache libwebp +RUN adduser -u 1000 -D tss-rocks +USER tss-rocks +WORKDIR /app +COPY --from=builder /app/tss-rocks-be . + +EXPOSE 8080 +ENV GIN_MODE=release + +CMD ["./tss-rocks-be"] diff --git a/backend/LICENSE.md b/backend/LICENSE.md new file mode 100644 index 0000000..c6f01c6 --- /dev/null +++ b/backend/LICENSE.md @@ -0,0 +1,660 @@ +# GNU AFFERO GENERAL PUBLIC LICENSE + +Version 3, 19 November 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +## Preamble + +The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains +free software for all its users. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + +A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + +The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + +An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing +under this license. + +The precise terms and conditions for copying, distribution and +modification follow. + +## TERMS AND CONDITIONS + +### 0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public +License. + +"Copyright" also means copyright-like laws that apply to other kinds +of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of +an exact copy. The resulting work is called a "modified version" of +the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user +through a computer network, with no transfer of a copy, is not +conveying. + +An interactive user interface displays "Appropriate Legal Notices" to +the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +### 1. Source Code. + +The "source code" for a work means the preferred form of the work for +making modifications to it. "Object code" means any non-source form of +a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can +regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same +work. + +### 2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, +without conditions so long as your license otherwise remains in force. +You may convey covered works to others for the sole purpose of having +them make modifications exclusively for you, or provide you with +facilities for running those works, provided that you comply with the +terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for +you must do so exclusively on your behalf, under your direction and +control, on terms that prohibit them from making any copies of your +copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the +conditions stated below. Sublicensing is not allowed; section 10 makes +it unnecessary. + +### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such +circumvention is effected by exercising rights under this License with +respect to the covered work, and you disclaim any intention to limit +operation or modification of the work as a means of enforcing, against +the work's users, your or third parties' legal rights to forbid +circumvention of technological measures. + +### 4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +### 5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these +conditions: + +- a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. +- b) The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in section 4 + to "keep intact all notices". +- c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. +- d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +### 6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms of +sections 4 and 5, provided that you also convey the machine-readable +Corresponding Source under the terms of this License, in one of these +ways: + +- a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. +- b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the Corresponding + Source from a network server at no charge. +- c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. +- d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. +- e) Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the general + public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, +family, or household purposes, or (2) anything designed or sold for +incorporation into a dwelling. In determining whether a product is a +consumer product, doubtful cases shall be resolved in favor of +coverage. For a particular product received by a particular user, +"normally used" refers to a typical or common use of that class of +product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected +to use, the product. A product is a consumer product regardless of +whether the product has substantial commercial, industrial or +non-consumer uses, unless such uses represent the only significant +mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to +install and execute modified versions of a covered work in that User +Product from a modified version of its Corresponding Source. The +information must suffice to ensure that the continued functioning of +the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or +updates for a work that has been modified or installed by the +recipient, or for the User Product in which it has been modified or +installed. Access to a network may be denied when the modification +itself materially and adversely affects the operation of the network +or violates the rules and protocols for communication across the +network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +### 7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders +of that material) supplement the terms of this License with terms: + +- a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or +- b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or +- c) Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or +- d) Limiting the use for publicity purposes of names of licensors + or authors of the material; or +- e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or +- f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions + of it) with contractual assumptions of liability to the recipient, + for any liability that these contractual assumptions directly + impose on those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; the +above requirements apply either way. + +### 8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your license +from a particular copyright holder is reinstated (a) provisionally, +unless and until the copyright holder explicitly and finally +terminates your license, and (b) permanently, if the copyright holder +fails to notify you of the violation by some reasonable means prior to +60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +### 9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run +a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +### 10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +### 11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on +the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the +business of distributing software, under which you make payment to the +third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties +who would receive the covered work from you, a discriminatory patent +license (a) in connection with copies of the covered work conveyed by +you (or copies made from those copies), or (b) primarily for and in +connection with specific products or compilations that contain the +covered work, unless you entered into that arrangement, or that patent +license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +### 12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under +this License and any other pertinent obligations, then as a +consequence you may not convey it at all. For example, if you agree to +terms that obligate you to collect a royalty for further conveying +from those to whom you convey the Program, the only way you could +satisfy both those terms and this License would be to refrain entirely +from conveying the Program. + +### 13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your +version supports such interaction) an opportunity to receive the +Corresponding Source of your version by providing access to the +Corresponding Source from a network server at no charge, through some +standard or customary means of facilitating copying of software. This +Corresponding Source shall include the Corresponding Source for any +work covered by version 3 of the GNU General Public License that is +incorporated pursuant to the following paragraph. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + +### 14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU Affero General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever +published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions +of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +### 15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT +WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +### 16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR +CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT +NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR +LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM +TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +### 17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + +## How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively state +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as + published by the Free Software Foundation, either version 3 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper +mail. + +If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for +the specific requirements. + +You should also get your employer (if you work as a programmer) or +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. For more information on this, and how to apply and follow +the GNU AGPL, see . diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 0000000..be61083 --- /dev/null +++ b/backend/README.md @@ -0,0 +1,69 @@ +# Tss.Rocks Backend + +> [!NOTE] +> This project is still in early development, contents and functionality is incomplete. + +Moderninzing the experience of TSS website. + +## Getting Started + +### Prerequisites + +- Go 1.23 or higher +- SQLite 3 + +### Installation + +1. Clone the repository + +2. Install dependencies: +```bash +go mod download +``` + +3. Copy the example config file and modify it: +```bash +cp config/config.example.yaml config/config.yaml +``` + +4. Update the configuration in `config/config.yaml` with your settings. + +### Running the Server + +```bash +go run cmd/server/main.go +``` + +The server will start on `http://localhost:8080` by default. + +## API Documentation + +### Authentication + +All protected endpoints require a JWT token in the Authorization header: +``` +Authorization: Bearer +``` + +### Endpoints + +Checkout `api/schemas` for the OpenAPI specification. + +To bundle the OpenAPI files into a single YAML file, run: + +```bash +./scripts/bundle-openapi.sh # For Bash +./scripts/bundle-openapi.ps1 # For PowerShell +``` + +## Contributing + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/amazing-feature`) +3. Commit your changes (`git commit -m 'Add some amazing feature'`) +4. Push to the branch (`git push origin feature/amazing-feature`) +5. Open a Pull Request + +## License + +This project is licensed under the AGPL v3.0 - see the [LICENSE](LICENSE.md) file for details. diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go new file mode 100644 index 0000000..f9050cf --- /dev/null +++ b/backend/cmd/server/main.go @@ -0,0 +1,79 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/rs/zerolog/log" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/server" + "tss-rocks-be/pkg/logger" +) + +func main() { + // Parse command line flags + configPath := flag.String("config", "config/config.yaml", "path to config file") + flag.Parse() + + // Load configuration + cfg, err := config.Load(*configPath) + if err != nil { + fmt.Printf("Error loading config: %v\n", err) + os.Exit(1) + } + + // Setup logger + logger.Setup(cfg) + + // Create a context that can be cancelled + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Set up signal handling + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + // Create ent client + client := server.NewEntClient(cfg) + if client == nil { + log.Fatal().Msg("Failed to create database client") + } + defer client.Close() + + // Run the auto migration tool + if err := client.Schema.Create(ctx); err != nil { + log.Fatal().Err(err).Msg("Failed to create schema resources") + } + + // Create and start server + srv, err := server.New(cfg, client) + if err != nil { + log.Fatal().Err(err).Msg("Failed to create server") + } + + // Start server in a goroutine + go func() { + if err := srv.Start(); err != nil { + log.Fatal().Err(err).Msg("Failed to start server") + } + }() + + // Wait for interrupt signal + sig := <-sigChan + log.Info().Msgf("Received signal: %v", sig) + + // Attempt graceful shutdown with timeout + log.Info().Msg("Shutting down server...") + shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := srv.Shutdown(shutdownCtx); err != nil { + log.Error().Err(err).Msg("Server forced to shutdown") + os.Exit(1) + } +} diff --git a/backend/cmd/server/main_test.go b/backend/cmd/server/main_test.go new file mode 100644 index 0000000..35eceef --- /dev/null +++ b/backend/cmd/server/main_test.go @@ -0,0 +1,127 @@ +package main + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/server" +) + +func TestConfigLoad(t *testing.T) { + // Create a temporary config file for testing + tmpConfig := ` +database: + driver: sqlite3 + dsn: ":memory:" +server: + port: 8080 + host: localhost +storage: + type: local + local: + root_dir: "./testdata" +` + tmpFile, err := os.CreateTemp("", "config-*.yaml") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.WriteString(tmpConfig) + require.NoError(t, err) + err = tmpFile.Close() + require.NoError(t, err) + + // Test config loading + cfg, err := config.Load(tmpFile.Name()) + require.NoError(t, err) + assert.Equal(t, "sqlite3", cfg.Database.Driver) + assert.Equal(t, ":memory:", cfg.Database.DSN) + assert.Equal(t, 8080, cfg.Server.Port) + assert.Equal(t, "localhost", cfg.Server.Host) + assert.Equal(t, "local", cfg.Storage.Type) + assert.Equal(t, "./testdata", cfg.Storage.Local.RootDir) +} + +func TestServerCreation(t *testing.T) { + cfg := &config.Config{ + Database: config.DatabaseConfig{ + Driver: "sqlite3", + DSN: ":memory:", + }, + Server: config.ServerConfig{ + Port: 8080, + Host: "localhost", + }, + Storage: config.StorageConfig{ + Type: "local", + Local: config.LocalStorage{ + RootDir: "./testdata", + }, + }, + } + + // Create ent client + client := server.NewEntClient(cfg) + require.NotNil(t, client) + defer client.Close() + + // Test schema creation + err := client.Schema.Create(context.Background()) + require.NoError(t, err) + + // Test server creation + srv, err := server.New(cfg, client) + require.NoError(t, err) + require.NotNil(t, srv) +} + +func TestServerStartAndShutdown(t *testing.T) { + cfg := &config.Config{ + Database: config.DatabaseConfig{ + Driver: "sqlite3", + DSN: ":memory:", + }, + Server: config.ServerConfig{ + Port: 0, // Use random available port + Host: "localhost", + }, + Storage: config.StorageConfig{ + Type: "local", + Local: config.LocalStorage{ + RootDir: "./testdata", + }, + }, + } + + client := server.NewEntClient(cfg) + require.NotNil(t, client) + defer client.Close() + + err := client.Schema.Create(context.Background()) + require.NoError(t, err) + + srv, err := server.New(cfg, client) + require.NoError(t, err) + + // Start server in goroutine + go func() { + err := srv.Start() + if err != nil { + t.Logf("Server stopped: %v", err) + } + }() + + // Give server time to start + time.Sleep(100 * time.Millisecond) + + // Test graceful shutdown + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err = srv.Shutdown(ctx) + assert.NoError(t, err) +} diff --git a/backend/config/config.yaml b/backend/config/config.yaml new file mode 100644 index 0000000..08f4c31 --- /dev/null +++ b/backend/config/config.yaml @@ -0,0 +1,91 @@ +database: + driver: sqlite3 + dsn: "file:tss-rocks.db?_fk=1&cache=shared" + +server: + port: 8080 + host: localhost + +jwt: + secret: "your-secret-key-here" # 在生产环境中应该使用环境变量 + expiration: 24h # token 过期时间 + +logging: + level: debug # debug, info, warn, error + format: json # json, console + +storage: + type: s3 # local or s3 + local: + root_dir: "./storage/media" + s3: + region: "us-east-1" + bucket: "your-bucket-name" + access_key_id: "your-access-key-id" + secret_access_key: "your-secret-access-key" + endpoint: "" # Optional, for MinIO or other S3-compatible services + custom_url: "" # Optional, for CDN or custom domain (e.g., https://cdn.example.com/media) + proxy_s3: false # If true, backend will proxy S3 requests instead of redirecting + upload: + max_size: 10 # 最大文件大小(MB) + allowed_types: # 允许的文件类型 + - image/jpeg + - image/png + - image/gif + - image/webp + - image/svg+xml + - application/pdf + - application/msword + - application/vnd.openxmlformats-officedocument.wordprocessingml.document + - application/vnd.ms-excel + - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + - application/zip + - application/x-rar-compressed + - text/plain + - text/csv + allowed_extensions: # 允许的文件扩展名(小写) + - .jpg + - .jpeg + - .png + - .gif + - .webp + - .svg + - .pdf + - .doc + - .docx + - .xls + - .xlsx + - .zip + - .rar + - .txt + - .csv + +rate_limit: + # IP限流配置 + ip_rate: 50 # 每秒请求数 + ip_burst: 25 # 突发请求数 + + # 路由限流配置 + route_rates: + "/api/v1/auth/login": + rate: 5 # 每秒5个请求 + burst: 10 # 突发10个请求 + "/api/v1/auth/register": + rate: 2 # 每秒2个请求 + burst: 5 # 突发5个请求 + "/api/v1/media/upload": + rate: 10 # 每秒10个请求 + burst: 20 # 突发20个请求 + +access_log: + enable_console: true # 启用控制台输出 + enable_file: true # 启用文件日志 + file_path: "./logs/access.log" # 日志文件路径 + format: "json" # 日志格式 (json 或 text) + level: "info" # 日志级别 + rotation: + max_size: 100 # 每个日志文件的最大大小(MB) + max_age: 30 # 保留旧日志文件的最大天数 + max_backups: 10 # 保留的旧日志文件的最大数量 + compress: true # 是否压缩旧日志文件 + local_time: true # 使用本地时间作为轮转时间 diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml new file mode 100644 index 0000000..fde9378 --- /dev/null +++ b/backend/docker-compose.yml @@ -0,0 +1,10 @@ +services: + app: + user: 1000:1000 + ports: + - 8080:8080 + volumes: + - ./config:/app/config + - ./data.db:/app/data.db + - ./media:/app/storage/media + - ./logs:/app/logs diff --git a/backend/ent/category.go b/backend/ent/category.go new file mode 100644 index 0000000..8b89100 --- /dev/null +++ b/backend/ent/category.go @@ -0,0 +1,173 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/category" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Category is the model entity for the Category schema. +type Category struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the CategoryQuery when eager-loading is set. + Edges CategoryEdges `json:"edges"` + selectValues sql.SelectValues +} + +// CategoryEdges holds the relations/edges for other nodes in the graph. +type CategoryEdges struct { + // Contents holds the value of the contents edge. + Contents []*CategoryContent `json:"contents,omitempty"` + // Posts holds the value of the posts edge. + Posts []*Post `json:"posts,omitempty"` + // DailyItems holds the value of the daily_items edge. + DailyItems []*Daily `json:"daily_items,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// ContentsOrErr returns the Contents value or an error if the edge +// was not loaded in eager-loading. +func (e CategoryEdges) ContentsOrErr() ([]*CategoryContent, error) { + if e.loadedTypes[0] { + return e.Contents, nil + } + return nil, &NotLoadedError{edge: "contents"} +} + +// PostsOrErr returns the Posts value or an error if the edge +// was not loaded in eager-loading. +func (e CategoryEdges) PostsOrErr() ([]*Post, error) { + if e.loadedTypes[1] { + return e.Posts, nil + } + return nil, &NotLoadedError{edge: "posts"} +} + +// DailyItemsOrErr returns the DailyItems value or an error if the edge +// was not loaded in eager-loading. +func (e CategoryEdges) DailyItemsOrErr() ([]*Daily, error) { + if e.loadedTypes[2] { + return e.DailyItems, nil + } + return nil, &NotLoadedError{edge: "daily_items"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Category) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case category.FieldID: + values[i] = new(sql.NullInt64) + case category.FieldCreatedAt, category.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Category fields. +func (c *Category) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case category.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + c.ID = int(value.Int64) + case category.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + c.CreatedAt = value.Time + } + case category.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + c.UpdatedAt = value.Time + } + default: + c.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Category. +// This includes values selected through modifiers, order, etc. +func (c *Category) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + +// QueryContents queries the "contents" edge of the Category entity. +func (c *Category) QueryContents() *CategoryContentQuery { + return NewCategoryClient(c.config).QueryContents(c) +} + +// QueryPosts queries the "posts" edge of the Category entity. +func (c *Category) QueryPosts() *PostQuery { + return NewCategoryClient(c.config).QueryPosts(c) +} + +// QueryDailyItems queries the "daily_items" edge of the Category entity. +func (c *Category) QueryDailyItems() *DailyQuery { + return NewCategoryClient(c.config).QueryDailyItems(c) +} + +// Update returns a builder for updating this Category. +// Note that you need to call Category.Unwrap() before calling this method if this Category +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Category) Update() *CategoryUpdateOne { + return NewCategoryClient(c.config).UpdateOne(c) +} + +// Unwrap unwraps the Category entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (c *Category) Unwrap() *Category { + _tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("ent: Category is not a transactional entity") + } + c.config.driver = _tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Category) String() string { + var builder strings.Builder + builder.WriteString("Category(") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("created_at=") + builder.WriteString(c.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(c.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Categories is a parsable slice of Category. +type Categories []*Category diff --git a/backend/ent/category/category.go b/backend/ent/category/category.go new file mode 100644 index 0000000..80073b1 --- /dev/null +++ b/backend/ent/category/category.go @@ -0,0 +1,157 @@ +// Code generated by ent, DO NOT EDIT. + +package category + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the category type in the database. + Label = "category" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeContents holds the string denoting the contents edge name in mutations. + EdgeContents = "contents" + // EdgePosts holds the string denoting the posts edge name in mutations. + EdgePosts = "posts" + // EdgeDailyItems holds the string denoting the daily_items edge name in mutations. + EdgeDailyItems = "daily_items" + // Table holds the table name of the category in the database. + Table = "categories" + // ContentsTable is the table that holds the contents relation/edge. + ContentsTable = "category_contents" + // ContentsInverseTable is the table name for the CategoryContent entity. + // It exists in this package in order to avoid circular dependency with the "categorycontent" package. + ContentsInverseTable = "category_contents" + // ContentsColumn is the table column denoting the contents relation/edge. + ContentsColumn = "category_contents" + // PostsTable is the table that holds the posts relation/edge. + PostsTable = "posts" + // PostsInverseTable is the table name for the Post entity. + // It exists in this package in order to avoid circular dependency with the "post" package. + PostsInverseTable = "posts" + // PostsColumn is the table column denoting the posts relation/edge. + PostsColumn = "category_posts" + // DailyItemsTable is the table that holds the daily_items relation/edge. + DailyItemsTable = "dailies" + // DailyItemsInverseTable is the table name for the Daily entity. + // It exists in this package in order to avoid circular dependency with the "daily" package. + DailyItemsInverseTable = "dailies" + // DailyItemsColumn is the table column denoting the daily_items relation/edge. + DailyItemsColumn = "category_daily_items" +) + +// Columns holds all SQL columns for category fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Category queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByContentsCount orders the results by contents count. +func ByContentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContentsStep(), opts...) + } +} + +// ByContents orders the results by contents terms. +func ByContents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByPostsCount orders the results by posts count. +func ByPostsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPostsStep(), opts...) + } +} + +// ByPosts orders the results by posts terms. +func ByPosts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPostsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDailyItemsCount orders the results by daily_items count. +func ByDailyItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDailyItemsStep(), opts...) + } +} + +// ByDailyItems orders the results by daily_items terms. +func ByDailyItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDailyItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newContentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) +} +func newPostsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PostsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostsTable, PostsColumn), + ) +} +func newDailyItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DailyItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyItemsTable, DailyItemsColumn), + ) +} diff --git a/backend/ent/category/where.go b/backend/ent/category/where.go new file mode 100644 index 0000000..ea42ba5 --- /dev/null +++ b/backend/ent/category/where.go @@ -0,0 +1,230 @@ +// Code generated by ent, DO NOT EDIT. + +package category + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Category { + return predicate.Category(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Category { + return predicate.Category(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Category { + return predicate.Category(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Category { + return predicate.Category(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Category { + return predicate.Category(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Category { + return predicate.Category(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Category { + return predicate.Category(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Category { + return predicate.Category(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Category { + return predicate.Category(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Category { + return predicate.Category(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Category { + return predicate.Category(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Category { + return predicate.Category(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Category { + return predicate.Category(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Category { + return predicate.Category(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Category { + return predicate.Category(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Category { + return predicate.Category(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Category { + return predicate.Category(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Category { + return predicate.Category(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Category { + return predicate.Category(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Category { + return predicate.Category(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Category { + return predicate.Category(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Category { + return predicate.Category(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasContents applies the HasEdge predicate on the "contents" edge. +func HasContents() predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContentsWith applies the HasEdge predicate on the "contents" edge with a given conditions (other predicates). +func HasContentsWith(preds ...predicate.CategoryContent) predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := newContentsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasPosts applies the HasEdge predicate on the "posts" edge. +func HasPosts() predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostsTable, PostsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPostsWith applies the HasEdge predicate on the "posts" edge with a given conditions (other predicates). +func HasPostsWith(preds ...predicate.Post) predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := newPostsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDailyItems applies the HasEdge predicate on the "daily_items" edge. +func HasDailyItems() predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyItemsTable, DailyItemsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDailyItemsWith applies the HasEdge predicate on the "daily_items" edge with a given conditions (other predicates). +func HasDailyItemsWith(preds ...predicate.Daily) predicate.Category { + return predicate.Category(func(s *sql.Selector) { + step := newDailyItemsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Category) predicate.Category { + return predicate.Category(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Category) predicate.Category { + return predicate.Category(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Category) predicate.Category { + return predicate.Category(sql.NotPredicates(p)) +} diff --git a/backend/ent/category_create.go b/backend/ent/category_create.go new file mode 100644 index 0000000..f479883 --- /dev/null +++ b/backend/ent/category_create.go @@ -0,0 +1,323 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/post" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryCreate is the builder for creating a Category entity. +type CategoryCreate struct { + config + mutation *CategoryMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (cc *CategoryCreate) SetCreatedAt(t time.Time) *CategoryCreate { + cc.mutation.SetCreatedAt(t) + return cc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cc *CategoryCreate) SetNillableCreatedAt(t *time.Time) *CategoryCreate { + if t != nil { + cc.SetCreatedAt(*t) + } + return cc +} + +// SetUpdatedAt sets the "updated_at" field. +func (cc *CategoryCreate) SetUpdatedAt(t time.Time) *CategoryCreate { + cc.mutation.SetUpdatedAt(t) + return cc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (cc *CategoryCreate) SetNillableUpdatedAt(t *time.Time) *CategoryCreate { + if t != nil { + cc.SetUpdatedAt(*t) + } + return cc +} + +// AddContentIDs adds the "contents" edge to the CategoryContent entity by IDs. +func (cc *CategoryCreate) AddContentIDs(ids ...int) *CategoryCreate { + cc.mutation.AddContentIDs(ids...) + return cc +} + +// AddContents adds the "contents" edges to the CategoryContent entity. +func (cc *CategoryCreate) AddContents(c ...*CategoryContent) *CategoryCreate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cc.AddContentIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the Post entity by IDs. +func (cc *CategoryCreate) AddPostIDs(ids ...int) *CategoryCreate { + cc.mutation.AddPostIDs(ids...) + return cc +} + +// AddPosts adds the "posts" edges to the Post entity. +func (cc *CategoryCreate) AddPosts(p ...*Post) *CategoryCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cc.AddPostIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (cc *CategoryCreate) AddDailyItemIDs(ids ...string) *CategoryCreate { + cc.mutation.AddDailyItemIDs(ids...) + return cc +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (cc *CategoryCreate) AddDailyItems(d ...*Daily) *CategoryCreate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return cc.AddDailyItemIDs(ids...) +} + +// Mutation returns the CategoryMutation object of the builder. +func (cc *CategoryCreate) Mutation() *CategoryMutation { + return cc.mutation +} + +// Save creates the Category in the database. +func (cc *CategoryCreate) Save(ctx context.Context) (*Category, error) { + cc.defaults() + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *CategoryCreate) SaveX(ctx context.Context) *Category { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cc *CategoryCreate) Exec(ctx context.Context) error { + _, err := cc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cc *CategoryCreate) ExecX(ctx context.Context) { + if err := cc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cc *CategoryCreate) defaults() { + if _, ok := cc.mutation.CreatedAt(); !ok { + v := category.DefaultCreatedAt() + cc.mutation.SetCreatedAt(v) + } + if _, ok := cc.mutation.UpdatedAt(); !ok { + v := category.DefaultUpdatedAt() + cc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cc *CategoryCreate) check() error { + if _, ok := cc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Category.created_at"`)} + } + if _, ok := cc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Category.updated_at"`)} + } + return nil +} + +func (cc *CategoryCreate) sqlSave(ctx context.Context) (*Category, error) { + if err := cc.check(); err != nil { + return nil, err + } + _node, _spec := cc.createSpec() + if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + cc.mutation.id = &_node.ID + cc.mutation.done = true + return _node, nil +} + +func (cc *CategoryCreate) createSpec() (*Category, *sqlgraph.CreateSpec) { + var ( + _node = &Category{config: cc.config} + _spec = sqlgraph.NewCreateSpec(category.Table, sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt)) + ) + if value, ok := cc.mutation.CreatedAt(); ok { + _spec.SetField(category.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := cc.mutation.UpdatedAt(); ok { + _spec.SetField(category.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := cc.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := cc.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := cc.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// CategoryCreateBulk is the builder for creating many Category entities in bulk. +type CategoryCreateBulk struct { + config + err error + builders []*CategoryCreate +} + +// Save creates the Category entities in the database. +func (ccb *CategoryCreateBulk) Save(ctx context.Context) ([]*Category, error) { + if ccb.err != nil { + return nil, ccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) + nodes := make([]*Category, len(ccb.builders)) + mutators := make([]Mutator, len(ccb.builders)) + for i := range ccb.builders { + func(i int, root context.Context) { + builder := ccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*CategoryMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ccb *CategoryCreateBulk) SaveX(ctx context.Context) []*Category { + v, err := ccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ccb *CategoryCreateBulk) Exec(ctx context.Context) error { + _, err := ccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccb *CategoryCreateBulk) ExecX(ctx context.Context) { + if err := ccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/category_delete.go b/backend/ent/category_delete.go new file mode 100644 index 0000000..4d6192b --- /dev/null +++ b/backend/ent/category_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryDelete is the builder for deleting a Category entity. +type CategoryDelete struct { + config + hooks []Hook + mutation *CategoryMutation +} + +// Where appends a list predicates to the CategoryDelete builder. +func (cd *CategoryDelete) Where(ps ...predicate.Category) *CategoryDelete { + cd.mutation.Where(ps...) + return cd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (cd *CategoryDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *CategoryDelete) ExecX(ctx context.Context) int { + n, err := cd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (cd *CategoryDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(category.Table, sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt)) + if ps := cd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err +} + +// CategoryDeleteOne is the builder for deleting a single Category entity. +type CategoryDeleteOne struct { + cd *CategoryDelete +} + +// Where appends a list predicates to the CategoryDelete builder. +func (cdo *CategoryDeleteOne) Where(ps ...predicate.Category) *CategoryDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + +// Exec executes the deletion query. +func (cdo *CategoryDeleteOne) Exec(ctx context.Context) error { + n, err := cdo.cd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{category.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *CategoryDeleteOne) ExecX(ctx context.Context) { + if err := cdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/category_query.go b/backend/ent/category_query.go new file mode 100644 index 0000000..26c7b61 --- /dev/null +++ b/backend/ent/category_query.go @@ -0,0 +1,757 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryQuery is the builder for querying Category entities. +type CategoryQuery struct { + config + ctx *QueryContext + order []category.OrderOption + inters []Interceptor + predicates []predicate.Category + withContents *CategoryContentQuery + withPosts *PostQuery + withDailyItems *DailyQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the CategoryQuery builder. +func (cq *CategoryQuery) Where(ps ...predicate.Category) *CategoryQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit the number of records to be returned by this query. +func (cq *CategoryQuery) Limit(limit int) *CategoryQuery { + cq.ctx.Limit = &limit + return cq +} + +// Offset to start from. +func (cq *CategoryQuery) Offset(offset int) *CategoryQuery { + cq.ctx.Offset = &offset + return cq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cq *CategoryQuery) Unique(unique bool) *CategoryQuery { + cq.ctx.Unique = &unique + return cq +} + +// Order specifies how the records should be ordered. +func (cq *CategoryQuery) Order(o ...category.OrderOption) *CategoryQuery { + cq.order = append(cq.order, o...) + return cq +} + +// QueryContents chains the current query on the "contents" edge. +func (cq *CategoryQuery) QueryContents() *CategoryContentQuery { + query := (&CategoryContentClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, selector), + sqlgraph.To(categorycontent.Table, categorycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.ContentsTable, category.ContentsColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryPosts chains the current query on the "posts" edge. +func (cq *CategoryQuery) QueryPosts() *PostQuery { + query := (&PostClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, selector), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.PostsTable, category.PostsColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDailyItems chains the current query on the "daily_items" edge. +func (cq *CategoryQuery) QueryDailyItems() *DailyQuery { + query := (&DailyClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, selector), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.DailyItemsTable, category.DailyItemsColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Category entity from the query. +// Returns a *NotFoundError when no Category was found. +func (cq *CategoryQuery) First(ctx context.Context) (*Category, error) { + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{category.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *CategoryQuery) FirstX(ctx context.Context) *Category { + node, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Category ID from the query. +// Returns a *NotFoundError when no Category ID was found. +func (cq *CategoryQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{category.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cq *CategoryQuery) FirstIDX(ctx context.Context) int { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Category entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Category entity is found. +// Returns a *NotFoundError when no Category entities are found. +func (cq *CategoryQuery) Only(ctx context.Context) (*Category, error) { + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{category.Label} + default: + return nil, &NotSingularError{category.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *CategoryQuery) OnlyX(ctx context.Context) *Category { + node, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Category ID in the query. +// Returns a *NotSingularError when more than one Category ID is found. +// Returns a *NotFoundError when no entities are found. +func (cq *CategoryQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{category.Label} + default: + err = &NotSingularError{category.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cq *CategoryQuery) OnlyIDX(ctx context.Context) int { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Categories. +func (cq *CategoryQuery) All(ctx context.Context) ([]*Category, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryAll) + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Category, *CategoryQuery]() + return withInterceptors[[]*Category](ctx, cq, qr, cq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cq *CategoryQuery) AllX(ctx context.Context) []*Category { + nodes, err := cq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Category IDs. +func (cq *CategoryQuery) IDs(ctx context.Context) (ids []int, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryIDs) + if err = cq.Select(category.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *CategoryQuery) IDsX(ctx context.Context) []int { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *CategoryQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryCount) + if err := cq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cq, querierCount[*CategoryQuery](), cq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cq *CategoryQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *CategoryQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryExist) + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *CategoryQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the CategoryQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cq *CategoryQuery) Clone() *CategoryQuery { + if cq == nil { + return nil + } + return &CategoryQuery{ + config: cq.config, + ctx: cq.ctx.Clone(), + order: append([]category.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), + predicates: append([]predicate.Category{}, cq.predicates...), + withContents: cq.withContents.Clone(), + withPosts: cq.withPosts.Clone(), + withDailyItems: cq.withDailyItems.Clone(), + // clone intermediate query. + sql: cq.sql.Clone(), + path: cq.path, + } +} + +// WithContents tells the query-builder to eager-load the nodes that are connected to +// the "contents" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *CategoryQuery) WithContents(opts ...func(*CategoryContentQuery)) *CategoryQuery { + query := (&CategoryContentClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withContents = query + return cq +} + +// WithPosts tells the query-builder to eager-load the nodes that are connected to +// the "posts" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *CategoryQuery) WithPosts(opts ...func(*PostQuery)) *CategoryQuery { + query := (&PostClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withPosts = query + return cq +} + +// WithDailyItems tells the query-builder to eager-load the nodes that are connected to +// the "daily_items" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *CategoryQuery) WithDailyItems(opts ...func(*DailyQuery)) *CategoryQuery { + query := (&DailyClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withDailyItems = query + return cq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Category.Query(). +// GroupBy(category.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (cq *CategoryQuery) GroupBy(field string, fields ...string) *CategoryGroupBy { + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &CategoryGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = category.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Category.Query(). +// Select(category.FieldCreatedAt). +// Scan(ctx, &v) +func (cq *CategoryQuery) Select(fields ...string) *CategorySelect { + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &CategorySelect{CategoryQuery: cq} + sbuild.label = category.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a CategorySelect configured with the given aggregations. +func (cq *CategoryQuery) Aggregate(fns ...AggregateFunc) *CategorySelect { + return cq.Select().Aggregate(fns...) +} + +func (cq *CategoryQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { + if !category.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if cq.path != nil { + prev, err := cq.path(ctx) + if err != nil { + return err + } + cq.sql = prev + } + return nil +} + +func (cq *CategoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Category, error) { + var ( + nodes = []*Category{} + _spec = cq.querySpec() + loadedTypes = [3]bool{ + cq.withContents != nil, + cq.withPosts != nil, + cq.withDailyItems != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Category).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Category{config: cq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := cq.withContents; query != nil { + if err := cq.loadContents(ctx, query, nodes, + func(n *Category) { n.Edges.Contents = []*CategoryContent{} }, + func(n *Category, e *CategoryContent) { n.Edges.Contents = append(n.Edges.Contents, e) }); err != nil { + return nil, err + } + } + if query := cq.withPosts; query != nil { + if err := cq.loadPosts(ctx, query, nodes, + func(n *Category) { n.Edges.Posts = []*Post{} }, + func(n *Category, e *Post) { n.Edges.Posts = append(n.Edges.Posts, e) }); err != nil { + return nil, err + } + } + if query := cq.withDailyItems; query != nil { + if err := cq.loadDailyItems(ctx, query, nodes, + func(n *Category) { n.Edges.DailyItems = []*Daily{} }, + func(n *Category, e *Daily) { n.Edges.DailyItems = append(n.Edges.DailyItems, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (cq *CategoryQuery) loadContents(ctx context.Context, query *CategoryContentQuery, nodes []*Category, init func(*Category), assign func(*Category, *CategoryContent)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Category) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.CategoryContent(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(category.ContentsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.category_contents + if fk == nil { + return fmt.Errorf(`foreign-key "category_contents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "category_contents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (cq *CategoryQuery) loadPosts(ctx context.Context, query *PostQuery, nodes []*Category, init func(*Category), assign func(*Category, *Post)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Category) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Post(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(category.PostsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.category_posts + if fk == nil { + return fmt.Errorf(`foreign-key "category_posts" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "category_posts" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (cq *CategoryQuery) loadDailyItems(ctx context.Context, query *DailyQuery, nodes []*Category, init func(*Category), assign func(*Category, *Daily)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Category) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Daily(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(category.DailyItemsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.category_daily_items + if fk == nil { + return fmt.Errorf(`foreign-key "category_daily_items" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "category_daily_items" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (cq *CategoryQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cq.querySpec() + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cq.driver, _spec) +} + +func (cq *CategoryQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(category.Table, category.Columns, sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true + } + if fields := cq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, category.FieldID) + for i := range fields { + if fields[i] != category.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cq *CategoryQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cq.driver.Dialect()) + t1 := builder.Table(category.Table) + columns := cq.ctx.Fields + if len(columns) == 0 { + columns = category.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(columns...)...) + } + if cq.ctx.Unique != nil && *cq.ctx.Unique { + selector.Distinct() + } + for _, p := range cq.predicates { + p(selector) + } + for _, p := range cq.order { + p(selector) + } + if offset := cq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// CategoryGroupBy is the group-by builder for Category entities. +type CategoryGroupBy struct { + selector + build *CategoryQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *CategoryGroupBy) Aggregate(fns ...AggregateFunc) *CategoryGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cgb *CategoryGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, ent.OpQueryGroupBy) + if err := cgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CategoryQuery, *CategoryGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) +} + +func (cgb *CategoryGroupBy) sqlScan(ctx context.Context, root *CategoryQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// CategorySelect is the builder for selecting fields of Category entities. +type CategorySelect struct { + *CategoryQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *CategorySelect) Aggregate(fns ...AggregateFunc) *CategorySelect { + cs.fns = append(cs.fns, fns...) + return cs +} + +// Scan applies the selector query and scans the result into the given value. +func (cs *CategorySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, ent.OpQuerySelect) + if err := cs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CategoryQuery, *CategorySelect](ctx, cs.CategoryQuery, cs, cs.inters, v) +} + +func (cs *CategorySelect) sqlScan(ctx context.Context, root *CategoryQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/category_update.go b/backend/ent/category_update.go new file mode 100644 index 0000000..628a7f2 --- /dev/null +++ b/backend/ent/category_update.go @@ -0,0 +1,735 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryUpdate is the builder for updating Category entities. +type CategoryUpdate struct { + config + hooks []Hook + mutation *CategoryMutation +} + +// Where appends a list predicates to the CategoryUpdate builder. +func (cu *CategoryUpdate) Where(ps ...predicate.Category) *CategoryUpdate { + cu.mutation.Where(ps...) + return cu +} + +// SetCreatedAt sets the "created_at" field. +func (cu *CategoryUpdate) SetCreatedAt(t time.Time) *CategoryUpdate { + cu.mutation.SetCreatedAt(t) + return cu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cu *CategoryUpdate) SetNillableCreatedAt(t *time.Time) *CategoryUpdate { + if t != nil { + cu.SetCreatedAt(*t) + } + return cu +} + +// SetUpdatedAt sets the "updated_at" field. +func (cu *CategoryUpdate) SetUpdatedAt(t time.Time) *CategoryUpdate { + cu.mutation.SetUpdatedAt(t) + return cu +} + +// AddContentIDs adds the "contents" edge to the CategoryContent entity by IDs. +func (cu *CategoryUpdate) AddContentIDs(ids ...int) *CategoryUpdate { + cu.mutation.AddContentIDs(ids...) + return cu +} + +// AddContents adds the "contents" edges to the CategoryContent entity. +func (cu *CategoryUpdate) AddContents(c ...*CategoryContent) *CategoryUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cu.AddContentIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the Post entity by IDs. +func (cu *CategoryUpdate) AddPostIDs(ids ...int) *CategoryUpdate { + cu.mutation.AddPostIDs(ids...) + return cu +} + +// AddPosts adds the "posts" edges to the Post entity. +func (cu *CategoryUpdate) AddPosts(p ...*Post) *CategoryUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cu.AddPostIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (cu *CategoryUpdate) AddDailyItemIDs(ids ...string) *CategoryUpdate { + cu.mutation.AddDailyItemIDs(ids...) + return cu +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (cu *CategoryUpdate) AddDailyItems(d ...*Daily) *CategoryUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return cu.AddDailyItemIDs(ids...) +} + +// Mutation returns the CategoryMutation object of the builder. +func (cu *CategoryUpdate) Mutation() *CategoryMutation { + return cu.mutation +} + +// ClearContents clears all "contents" edges to the CategoryContent entity. +func (cu *CategoryUpdate) ClearContents() *CategoryUpdate { + cu.mutation.ClearContents() + return cu +} + +// RemoveContentIDs removes the "contents" edge to CategoryContent entities by IDs. +func (cu *CategoryUpdate) RemoveContentIDs(ids ...int) *CategoryUpdate { + cu.mutation.RemoveContentIDs(ids...) + return cu +} + +// RemoveContents removes "contents" edges to CategoryContent entities. +func (cu *CategoryUpdate) RemoveContents(c ...*CategoryContent) *CategoryUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cu.RemoveContentIDs(ids...) +} + +// ClearPosts clears all "posts" edges to the Post entity. +func (cu *CategoryUpdate) ClearPosts() *CategoryUpdate { + cu.mutation.ClearPosts() + return cu +} + +// RemovePostIDs removes the "posts" edge to Post entities by IDs. +func (cu *CategoryUpdate) RemovePostIDs(ids ...int) *CategoryUpdate { + cu.mutation.RemovePostIDs(ids...) + return cu +} + +// RemovePosts removes "posts" edges to Post entities. +func (cu *CategoryUpdate) RemovePosts(p ...*Post) *CategoryUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cu.RemovePostIDs(ids...) +} + +// ClearDailyItems clears all "daily_items" edges to the Daily entity. +func (cu *CategoryUpdate) ClearDailyItems() *CategoryUpdate { + cu.mutation.ClearDailyItems() + return cu +} + +// RemoveDailyItemIDs removes the "daily_items" edge to Daily entities by IDs. +func (cu *CategoryUpdate) RemoveDailyItemIDs(ids ...string) *CategoryUpdate { + cu.mutation.RemoveDailyItemIDs(ids...) + return cu +} + +// RemoveDailyItems removes "daily_items" edges to Daily entities. +func (cu *CategoryUpdate) RemoveDailyItems(d ...*Daily) *CategoryUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return cu.RemoveDailyItemIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cu *CategoryUpdate) Save(ctx context.Context) (int, error) { + cu.defaults() + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *CategoryUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *CategoryUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *CategoryUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cu *CategoryUpdate) defaults() { + if _, ok := cu.mutation.UpdatedAt(); !ok { + v := category.UpdateDefaultUpdatedAt() + cu.mutation.SetUpdatedAt(v) + } +} + +func (cu *CategoryUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(category.Table, category.Columns, sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt)) + if ps := cu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cu.mutation.CreatedAt(); ok { + _spec.SetField(category.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := cu.mutation.UpdatedAt(); ok { + _spec.SetField(category.FieldUpdatedAt, field.TypeTime, value) + } + if cu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.RemovedContentsIDs(); len(nodes) > 0 && !cu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cu.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.RemovedPostsIDs(); len(nodes) > 0 && !cu.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cu.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.RemovedDailyItemsIDs(); len(nodes) > 0 && !cu.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, cu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{category.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cu.mutation.done = true + return n, nil +} + +// CategoryUpdateOne is the builder for updating a single Category entity. +type CategoryUpdateOne struct { + config + fields []string + hooks []Hook + mutation *CategoryMutation +} + +// SetCreatedAt sets the "created_at" field. +func (cuo *CategoryUpdateOne) SetCreatedAt(t time.Time) *CategoryUpdateOne { + cuo.mutation.SetCreatedAt(t) + return cuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cuo *CategoryUpdateOne) SetNillableCreatedAt(t *time.Time) *CategoryUpdateOne { + if t != nil { + cuo.SetCreatedAt(*t) + } + return cuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (cuo *CategoryUpdateOne) SetUpdatedAt(t time.Time) *CategoryUpdateOne { + cuo.mutation.SetUpdatedAt(t) + return cuo +} + +// AddContentIDs adds the "contents" edge to the CategoryContent entity by IDs. +func (cuo *CategoryUpdateOne) AddContentIDs(ids ...int) *CategoryUpdateOne { + cuo.mutation.AddContentIDs(ids...) + return cuo +} + +// AddContents adds the "contents" edges to the CategoryContent entity. +func (cuo *CategoryUpdateOne) AddContents(c ...*CategoryContent) *CategoryUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cuo.AddContentIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the Post entity by IDs. +func (cuo *CategoryUpdateOne) AddPostIDs(ids ...int) *CategoryUpdateOne { + cuo.mutation.AddPostIDs(ids...) + return cuo +} + +// AddPosts adds the "posts" edges to the Post entity. +func (cuo *CategoryUpdateOne) AddPosts(p ...*Post) *CategoryUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cuo.AddPostIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (cuo *CategoryUpdateOne) AddDailyItemIDs(ids ...string) *CategoryUpdateOne { + cuo.mutation.AddDailyItemIDs(ids...) + return cuo +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (cuo *CategoryUpdateOne) AddDailyItems(d ...*Daily) *CategoryUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return cuo.AddDailyItemIDs(ids...) +} + +// Mutation returns the CategoryMutation object of the builder. +func (cuo *CategoryUpdateOne) Mutation() *CategoryMutation { + return cuo.mutation +} + +// ClearContents clears all "contents" edges to the CategoryContent entity. +func (cuo *CategoryUpdateOne) ClearContents() *CategoryUpdateOne { + cuo.mutation.ClearContents() + return cuo +} + +// RemoveContentIDs removes the "contents" edge to CategoryContent entities by IDs. +func (cuo *CategoryUpdateOne) RemoveContentIDs(ids ...int) *CategoryUpdateOne { + cuo.mutation.RemoveContentIDs(ids...) + return cuo +} + +// RemoveContents removes "contents" edges to CategoryContent entities. +func (cuo *CategoryUpdateOne) RemoveContents(c ...*CategoryContent) *CategoryUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cuo.RemoveContentIDs(ids...) +} + +// ClearPosts clears all "posts" edges to the Post entity. +func (cuo *CategoryUpdateOne) ClearPosts() *CategoryUpdateOne { + cuo.mutation.ClearPosts() + return cuo +} + +// RemovePostIDs removes the "posts" edge to Post entities by IDs. +func (cuo *CategoryUpdateOne) RemovePostIDs(ids ...int) *CategoryUpdateOne { + cuo.mutation.RemovePostIDs(ids...) + return cuo +} + +// RemovePosts removes "posts" edges to Post entities. +func (cuo *CategoryUpdateOne) RemovePosts(p ...*Post) *CategoryUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cuo.RemovePostIDs(ids...) +} + +// ClearDailyItems clears all "daily_items" edges to the Daily entity. +func (cuo *CategoryUpdateOne) ClearDailyItems() *CategoryUpdateOne { + cuo.mutation.ClearDailyItems() + return cuo +} + +// RemoveDailyItemIDs removes the "daily_items" edge to Daily entities by IDs. +func (cuo *CategoryUpdateOne) RemoveDailyItemIDs(ids ...string) *CategoryUpdateOne { + cuo.mutation.RemoveDailyItemIDs(ids...) + return cuo +} + +// RemoveDailyItems removes "daily_items" edges to Daily entities. +func (cuo *CategoryUpdateOne) RemoveDailyItems(d ...*Daily) *CategoryUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return cuo.RemoveDailyItemIDs(ids...) +} + +// Where appends a list predicates to the CategoryUpdate builder. +func (cuo *CategoryUpdateOne) Where(ps ...predicate.Category) *CategoryUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cuo *CategoryUpdateOne) Select(field string, fields ...string) *CategoryUpdateOne { + cuo.fields = append([]string{field}, fields...) + return cuo +} + +// Save executes the query and returns the updated Category entity. +func (cuo *CategoryUpdateOne) Save(ctx context.Context) (*Category, error) { + cuo.defaults() + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *CategoryUpdateOne) SaveX(ctx context.Context) *Category { + node, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cuo *CategoryUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *CategoryUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cuo *CategoryUpdateOne) defaults() { + if _, ok := cuo.mutation.UpdatedAt(); !ok { + v := category.UpdateDefaultUpdatedAt() + cuo.mutation.SetUpdatedAt(v) + } +} + +func (cuo *CategoryUpdateOne) sqlSave(ctx context.Context) (_node *Category, err error) { + _spec := sqlgraph.NewUpdateSpec(category.Table, category.Columns, sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt)) + id, ok := cuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Category.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, category.FieldID) + for _, f := range fields { + if !category.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != category.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cuo.mutation.CreatedAt(); ok { + _spec.SetField(category.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := cuo.mutation.UpdatedAt(); ok { + _spec.SetField(category.FieldUpdatedAt, field.TypeTime, value) + } + if cuo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.RemovedContentsIDs(); len(nodes) > 0 && !cuo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.ContentsTable, + Columns: []string{category.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cuo.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.RemovedPostsIDs(); len(nodes) > 0 && !cuo.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.PostsTable, + Columns: []string{category.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cuo.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.RemovedDailyItemsIDs(); len(nodes) > 0 && !cuo.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: category.DailyItemsTable, + Columns: []string{category.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Category{config: cuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{category.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/categorycontent.go b/backend/ent/categorycontent.go new file mode 100644 index 0000000..7b2140f --- /dev/null +++ b/backend/ent/categorycontent.go @@ -0,0 +1,175 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// CategoryContent is the model entity for the CategoryContent schema. +type CategoryContent struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // LanguageCode holds the value of the "language_code" field. + LanguageCode categorycontent.LanguageCode `json:"language_code,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Slug holds the value of the "slug" field. + Slug string `json:"slug,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the CategoryContentQuery when eager-loading is set. + Edges CategoryContentEdges `json:"edges"` + category_contents *int + selectValues sql.SelectValues +} + +// CategoryContentEdges holds the relations/edges for other nodes in the graph. +type CategoryContentEdges struct { + // Category holds the value of the category edge. + Category *Category `json:"category,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// CategoryOrErr returns the Category value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e CategoryContentEdges) CategoryOrErr() (*Category, error) { + if e.Category != nil { + return e.Category, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: category.Label} + } + return nil, &NotLoadedError{edge: "category"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*CategoryContent) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case categorycontent.FieldID: + values[i] = new(sql.NullInt64) + case categorycontent.FieldLanguageCode, categorycontent.FieldName, categorycontent.FieldDescription, categorycontent.FieldSlug: + values[i] = new(sql.NullString) + case categorycontent.ForeignKeys[0]: // category_contents + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the CategoryContent fields. +func (cc *CategoryContent) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case categorycontent.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + cc.ID = int(value.Int64) + case categorycontent.FieldLanguageCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field language_code", values[i]) + } else if value.Valid { + cc.LanguageCode = categorycontent.LanguageCode(value.String) + } + case categorycontent.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + cc.Name = value.String + } + case categorycontent.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + cc.Description = value.String + } + case categorycontent.FieldSlug: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field slug", values[i]) + } else if value.Valid { + cc.Slug = value.String + } + case categorycontent.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field category_contents", value) + } else if value.Valid { + cc.category_contents = new(int) + *cc.category_contents = int(value.Int64) + } + default: + cc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the CategoryContent. +// This includes values selected through modifiers, order, etc. +func (cc *CategoryContent) Value(name string) (ent.Value, error) { + return cc.selectValues.Get(name) +} + +// QueryCategory queries the "category" edge of the CategoryContent entity. +func (cc *CategoryContent) QueryCategory() *CategoryQuery { + return NewCategoryContentClient(cc.config).QueryCategory(cc) +} + +// Update returns a builder for updating this CategoryContent. +// Note that you need to call CategoryContent.Unwrap() before calling this method if this CategoryContent +// was returned from a transaction, and the transaction was committed or rolled back. +func (cc *CategoryContent) Update() *CategoryContentUpdateOne { + return NewCategoryContentClient(cc.config).UpdateOne(cc) +} + +// Unwrap unwraps the CategoryContent entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (cc *CategoryContent) Unwrap() *CategoryContent { + _tx, ok := cc.config.driver.(*txDriver) + if !ok { + panic("ent: CategoryContent is not a transactional entity") + } + cc.config.driver = _tx.drv + return cc +} + +// String implements the fmt.Stringer. +func (cc *CategoryContent) String() string { + var builder strings.Builder + builder.WriteString("CategoryContent(") + builder.WriteString(fmt.Sprintf("id=%v, ", cc.ID)) + builder.WriteString("language_code=") + builder.WriteString(fmt.Sprintf("%v", cc.LanguageCode)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(cc.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(cc.Description) + builder.WriteString(", ") + builder.WriteString("slug=") + builder.WriteString(cc.Slug) + builder.WriteByte(')') + return builder.String() +} + +// CategoryContents is a parsable slice of CategoryContent. +type CategoryContents []*CategoryContent diff --git a/backend/ent/categorycontent/categorycontent.go b/backend/ent/categorycontent/categorycontent.go new file mode 100644 index 0000000..8564d31 --- /dev/null +++ b/backend/ent/categorycontent/categorycontent.go @@ -0,0 +1,139 @@ +// Code generated by ent, DO NOT EDIT. + +package categorycontent + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the categorycontent type in the database. + Label = "category_content" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldLanguageCode holds the string denoting the language_code field in the database. + FieldLanguageCode = "language_code" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldSlug holds the string denoting the slug field in the database. + FieldSlug = "slug" + // EdgeCategory holds the string denoting the category edge name in mutations. + EdgeCategory = "category" + // Table holds the table name of the categorycontent in the database. + Table = "category_contents" + // CategoryTable is the table that holds the category relation/edge. + CategoryTable = "category_contents" + // CategoryInverseTable is the table name for the Category entity. + // It exists in this package in order to avoid circular dependency with the "category" package. + CategoryInverseTable = "categories" + // CategoryColumn is the table column denoting the category relation/edge. + CategoryColumn = "category_contents" +) + +// Columns holds all SQL columns for categorycontent fields. +var Columns = []string{ + FieldID, + FieldLanguageCode, + FieldName, + FieldDescription, + FieldSlug, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "category_contents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "category_contents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // SlugValidator is a validator for the "slug" field. It is called by the builders before save. + SlugValidator func(string) error +) + +// LanguageCode defines the type for the "language_code" enum field. +type LanguageCode string + +// LanguageCode values. +const ( + LanguageCodeEN LanguageCode = "en" + LanguageCodeZH_HANS LanguageCode = "zh-Hans" + LanguageCodeZH_HANT LanguageCode = "zh-Hant" +) + +func (lc LanguageCode) String() string { + return string(lc) +} + +// LanguageCodeValidator is a validator for the "language_code" field enum values. It is called by the builders before save. +func LanguageCodeValidator(lc LanguageCode) error { + switch lc { + case LanguageCodeEN, LanguageCodeZH_HANS, LanguageCodeZH_HANT: + return nil + default: + return fmt.Errorf("categorycontent: invalid enum value for language_code field: %q", lc) + } +} + +// OrderOption defines the ordering options for the CategoryContent queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByLanguageCode orders the results by the language_code field. +func ByLanguageCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguageCode, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// BySlug orders the results by the slug field. +func BySlug(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSlug, opts...).ToFunc() +} + +// ByCategoryField orders the results by category field. +func ByCategoryField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCategoryStep(), sql.OrderByField(field, opts...)) + } +} +func newCategoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CategoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) +} diff --git a/backend/ent/categorycontent/where.go b/backend/ent/categorycontent/where.go new file mode 100644 index 0000000..ebbf1c8 --- /dev/null +++ b/backend/ent/categorycontent/where.go @@ -0,0 +1,333 @@ +// Code generated by ent, DO NOT EDIT. + +package categorycontent + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldDescription, v)) +} + +// Slug applies equality check predicate on the "slug" field. It's identical to SlugEQ. +func Slug(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldSlug, v)) +} + +// LanguageCodeEQ applies the EQ predicate on the "language_code" field. +func LanguageCodeEQ(v LanguageCode) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldLanguageCode, v)) +} + +// LanguageCodeNEQ applies the NEQ predicate on the "language_code" field. +func LanguageCodeNEQ(v LanguageCode) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNEQ(FieldLanguageCode, v)) +} + +// LanguageCodeIn applies the In predicate on the "language_code" field. +func LanguageCodeIn(vs ...LanguageCode) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeNotIn applies the NotIn predicate on the "language_code" field. +func LanguageCodeNotIn(vs ...LanguageCode) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotIn(FieldLanguageCode, vs...)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContainsFold(FieldDescription, v)) +} + +// SlugEQ applies the EQ predicate on the "slug" field. +func SlugEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEQ(FieldSlug, v)) +} + +// SlugNEQ applies the NEQ predicate on the "slug" field. +func SlugNEQ(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNEQ(FieldSlug, v)) +} + +// SlugIn applies the In predicate on the "slug" field. +func SlugIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldIn(FieldSlug, vs...)) +} + +// SlugNotIn applies the NotIn predicate on the "slug" field. +func SlugNotIn(vs ...string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldNotIn(FieldSlug, vs...)) +} + +// SlugGT applies the GT predicate on the "slug" field. +func SlugGT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGT(FieldSlug, v)) +} + +// SlugGTE applies the GTE predicate on the "slug" field. +func SlugGTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldGTE(FieldSlug, v)) +} + +// SlugLT applies the LT predicate on the "slug" field. +func SlugLT(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLT(FieldSlug, v)) +} + +// SlugLTE applies the LTE predicate on the "slug" field. +func SlugLTE(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldLTE(FieldSlug, v)) +} + +// SlugContains applies the Contains predicate on the "slug" field. +func SlugContains(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContains(FieldSlug, v)) +} + +// SlugHasPrefix applies the HasPrefix predicate on the "slug" field. +func SlugHasPrefix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasPrefix(FieldSlug, v)) +} + +// SlugHasSuffix applies the HasSuffix predicate on the "slug" field. +func SlugHasSuffix(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldHasSuffix(FieldSlug, v)) +} + +// SlugEqualFold applies the EqualFold predicate on the "slug" field. +func SlugEqualFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldEqualFold(FieldSlug, v)) +} + +// SlugContainsFold applies the ContainsFold predicate on the "slug" field. +func SlugContainsFold(v string) predicate.CategoryContent { + return predicate.CategoryContent(sql.FieldContainsFold(FieldSlug, v)) +} + +// HasCategory applies the HasEdge predicate on the "category" edge. +func HasCategory() predicate.CategoryContent { + return predicate.CategoryContent(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasCategoryWith applies the HasEdge predicate on the "category" edge with a given conditions (other predicates). +func HasCategoryWith(preds ...predicate.Category) predicate.CategoryContent { + return predicate.CategoryContent(func(s *sql.Selector) { + step := newCategoryStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.CategoryContent) predicate.CategoryContent { + return predicate.CategoryContent(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.CategoryContent) predicate.CategoryContent { + return predicate.CategoryContent(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.CategoryContent) predicate.CategoryContent { + return predicate.CategoryContent(sql.NotPredicates(p)) +} diff --git a/backend/ent/categorycontent_create.go b/backend/ent/categorycontent_create.go new file mode 100644 index 0000000..1cb7c0b --- /dev/null +++ b/backend/ent/categorycontent_create.go @@ -0,0 +1,279 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryContentCreate is the builder for creating a CategoryContent entity. +type CategoryContentCreate struct { + config + mutation *CategoryContentMutation + hooks []Hook +} + +// SetLanguageCode sets the "language_code" field. +func (ccc *CategoryContentCreate) SetLanguageCode(cc categorycontent.LanguageCode) *CategoryContentCreate { + ccc.mutation.SetLanguageCode(cc) + return ccc +} + +// SetName sets the "name" field. +func (ccc *CategoryContentCreate) SetName(s string) *CategoryContentCreate { + ccc.mutation.SetName(s) + return ccc +} + +// SetDescription sets the "description" field. +func (ccc *CategoryContentCreate) SetDescription(s string) *CategoryContentCreate { + ccc.mutation.SetDescription(s) + return ccc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (ccc *CategoryContentCreate) SetNillableDescription(s *string) *CategoryContentCreate { + if s != nil { + ccc.SetDescription(*s) + } + return ccc +} + +// SetSlug sets the "slug" field. +func (ccc *CategoryContentCreate) SetSlug(s string) *CategoryContentCreate { + ccc.mutation.SetSlug(s) + return ccc +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (ccc *CategoryContentCreate) SetCategoryID(id int) *CategoryContentCreate { + ccc.mutation.SetCategoryID(id) + return ccc +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (ccc *CategoryContentCreate) SetNillableCategoryID(id *int) *CategoryContentCreate { + if id != nil { + ccc = ccc.SetCategoryID(*id) + } + return ccc +} + +// SetCategory sets the "category" edge to the Category entity. +func (ccc *CategoryContentCreate) SetCategory(c *Category) *CategoryContentCreate { + return ccc.SetCategoryID(c.ID) +} + +// Mutation returns the CategoryContentMutation object of the builder. +func (ccc *CategoryContentCreate) Mutation() *CategoryContentMutation { + return ccc.mutation +} + +// Save creates the CategoryContent in the database. +func (ccc *CategoryContentCreate) Save(ctx context.Context) (*CategoryContent, error) { + return withHooks(ctx, ccc.sqlSave, ccc.mutation, ccc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (ccc *CategoryContentCreate) SaveX(ctx context.Context) *CategoryContent { + v, err := ccc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ccc *CategoryContentCreate) Exec(ctx context.Context) error { + _, err := ccc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccc *CategoryContentCreate) ExecX(ctx context.Context) { + if err := ccc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ccc *CategoryContentCreate) check() error { + if _, ok := ccc.mutation.LanguageCode(); !ok { + return &ValidationError{Name: "language_code", err: errors.New(`ent: missing required field "CategoryContent.language_code"`)} + } + if v, ok := ccc.mutation.LanguageCode(); ok { + if err := categorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.language_code": %w`, err)} + } + } + if _, ok := ccc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "CategoryContent.name"`)} + } + if v, ok := ccc.mutation.Name(); ok { + if err := categorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.name": %w`, err)} + } + } + if _, ok := ccc.mutation.Slug(); !ok { + return &ValidationError{Name: "slug", err: errors.New(`ent: missing required field "CategoryContent.slug"`)} + } + if v, ok := ccc.mutation.Slug(); ok { + if err := categorycontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.slug": %w`, err)} + } + } + return nil +} + +func (ccc *CategoryContentCreate) sqlSave(ctx context.Context) (*CategoryContent, error) { + if err := ccc.check(); err != nil { + return nil, err + } + _node, _spec := ccc.createSpec() + if err := sqlgraph.CreateNode(ctx, ccc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + ccc.mutation.id = &_node.ID + ccc.mutation.done = true + return _node, nil +} + +func (ccc *CategoryContentCreate) createSpec() (*CategoryContent, *sqlgraph.CreateSpec) { + var ( + _node = &CategoryContent{config: ccc.config} + _spec = sqlgraph.NewCreateSpec(categorycontent.Table, sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt)) + ) + if value, ok := ccc.mutation.LanguageCode(); ok { + _spec.SetField(categorycontent.FieldLanguageCode, field.TypeEnum, value) + _node.LanguageCode = value + } + if value, ok := ccc.mutation.Name(); ok { + _spec.SetField(categorycontent.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := ccc.mutation.Description(); ok { + _spec.SetField(categorycontent.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := ccc.mutation.Slug(); ok { + _spec.SetField(categorycontent.FieldSlug, field.TypeString, value) + _node.Slug = value + } + if nodes := ccc.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: categorycontent.CategoryTable, + Columns: []string{categorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.category_contents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// CategoryContentCreateBulk is the builder for creating many CategoryContent entities in bulk. +type CategoryContentCreateBulk struct { + config + err error + builders []*CategoryContentCreate +} + +// Save creates the CategoryContent entities in the database. +func (cccb *CategoryContentCreateBulk) Save(ctx context.Context) ([]*CategoryContent, error) { + if cccb.err != nil { + return nil, cccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(cccb.builders)) + nodes := make([]*CategoryContent, len(cccb.builders)) + mutators := make([]Mutator, len(cccb.builders)) + for i := range cccb.builders { + func(i int, root context.Context) { + builder := cccb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*CategoryContentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, cccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, cccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, cccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (cccb *CategoryContentCreateBulk) SaveX(ctx context.Context) []*CategoryContent { + v, err := cccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cccb *CategoryContentCreateBulk) Exec(ctx context.Context) error { + _, err := cccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cccb *CategoryContentCreateBulk) ExecX(ctx context.Context) { + if err := cccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/categorycontent_delete.go b/backend/ent/categorycontent_delete.go new file mode 100644 index 0000000..aaf0198 --- /dev/null +++ b/backend/ent/categorycontent_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryContentDelete is the builder for deleting a CategoryContent entity. +type CategoryContentDelete struct { + config + hooks []Hook + mutation *CategoryContentMutation +} + +// Where appends a list predicates to the CategoryContentDelete builder. +func (ccd *CategoryContentDelete) Where(ps ...predicate.CategoryContent) *CategoryContentDelete { + ccd.mutation.Where(ps...) + return ccd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ccd *CategoryContentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ccd.sqlExec, ccd.mutation, ccd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccd *CategoryContentDelete) ExecX(ctx context.Context) int { + n, err := ccd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ccd *CategoryContentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(categorycontent.Table, sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt)) + if ps := ccd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ccd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ccd.mutation.done = true + return affected, err +} + +// CategoryContentDeleteOne is the builder for deleting a single CategoryContent entity. +type CategoryContentDeleteOne struct { + ccd *CategoryContentDelete +} + +// Where appends a list predicates to the CategoryContentDelete builder. +func (ccdo *CategoryContentDeleteOne) Where(ps ...predicate.CategoryContent) *CategoryContentDeleteOne { + ccdo.ccd.mutation.Where(ps...) + return ccdo +} + +// Exec executes the deletion query. +func (ccdo *CategoryContentDeleteOne) Exec(ctx context.Context) error { + n, err := ccdo.ccd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{categorycontent.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccdo *CategoryContentDeleteOne) ExecX(ctx context.Context) { + if err := ccdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/categorycontent_query.go b/backend/ent/categorycontent_query.go new file mode 100644 index 0000000..ad5f5f7 --- /dev/null +++ b/backend/ent/categorycontent_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryContentQuery is the builder for querying CategoryContent entities. +type CategoryContentQuery struct { + config + ctx *QueryContext + order []categorycontent.OrderOption + inters []Interceptor + predicates []predicate.CategoryContent + withCategory *CategoryQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the CategoryContentQuery builder. +func (ccq *CategoryContentQuery) Where(ps ...predicate.CategoryContent) *CategoryContentQuery { + ccq.predicates = append(ccq.predicates, ps...) + return ccq +} + +// Limit the number of records to be returned by this query. +func (ccq *CategoryContentQuery) Limit(limit int) *CategoryContentQuery { + ccq.ctx.Limit = &limit + return ccq +} + +// Offset to start from. +func (ccq *CategoryContentQuery) Offset(offset int) *CategoryContentQuery { + ccq.ctx.Offset = &offset + return ccq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (ccq *CategoryContentQuery) Unique(unique bool) *CategoryContentQuery { + ccq.ctx.Unique = &unique + return ccq +} + +// Order specifies how the records should be ordered. +func (ccq *CategoryContentQuery) Order(o ...categorycontent.OrderOption) *CategoryContentQuery { + ccq.order = append(ccq.order, o...) + return ccq +} + +// QueryCategory chains the current query on the "category" edge. +func (ccq *CategoryContentQuery) QueryCategory() *CategoryQuery { + query := (&CategoryClient{config: ccq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := ccq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := ccq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(categorycontent.Table, categorycontent.FieldID, selector), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, categorycontent.CategoryTable, categorycontent.CategoryColumn), + ) + fromU = sqlgraph.SetNeighbors(ccq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first CategoryContent entity from the query. +// Returns a *NotFoundError when no CategoryContent was found. +func (ccq *CategoryContentQuery) First(ctx context.Context) (*CategoryContent, error) { + nodes, err := ccq.Limit(1).All(setContextOp(ctx, ccq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{categorycontent.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (ccq *CategoryContentQuery) FirstX(ctx context.Context) *CategoryContent { + node, err := ccq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first CategoryContent ID from the query. +// Returns a *NotFoundError when no CategoryContent ID was found. +func (ccq *CategoryContentQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = ccq.Limit(1).IDs(setContextOp(ctx, ccq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{categorycontent.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (ccq *CategoryContentQuery) FirstIDX(ctx context.Context) int { + id, err := ccq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single CategoryContent entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one CategoryContent entity is found. +// Returns a *NotFoundError when no CategoryContent entities are found. +func (ccq *CategoryContentQuery) Only(ctx context.Context) (*CategoryContent, error) { + nodes, err := ccq.Limit(2).All(setContextOp(ctx, ccq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{categorycontent.Label} + default: + return nil, &NotSingularError{categorycontent.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (ccq *CategoryContentQuery) OnlyX(ctx context.Context) *CategoryContent { + node, err := ccq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only CategoryContent ID in the query. +// Returns a *NotSingularError when more than one CategoryContent ID is found. +// Returns a *NotFoundError when no entities are found. +func (ccq *CategoryContentQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = ccq.Limit(2).IDs(setContextOp(ctx, ccq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{categorycontent.Label} + default: + err = &NotSingularError{categorycontent.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (ccq *CategoryContentQuery) OnlyIDX(ctx context.Context) int { + id, err := ccq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of CategoryContents. +func (ccq *CategoryContentQuery) All(ctx context.Context) ([]*CategoryContent, error) { + ctx = setContextOp(ctx, ccq.ctx, ent.OpQueryAll) + if err := ccq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*CategoryContent, *CategoryContentQuery]() + return withInterceptors[[]*CategoryContent](ctx, ccq, qr, ccq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (ccq *CategoryContentQuery) AllX(ctx context.Context) []*CategoryContent { + nodes, err := ccq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of CategoryContent IDs. +func (ccq *CategoryContentQuery) IDs(ctx context.Context) (ids []int, err error) { + if ccq.ctx.Unique == nil && ccq.path != nil { + ccq.Unique(true) + } + ctx = setContextOp(ctx, ccq.ctx, ent.OpQueryIDs) + if err = ccq.Select(categorycontent.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (ccq *CategoryContentQuery) IDsX(ctx context.Context) []int { + ids, err := ccq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (ccq *CategoryContentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, ccq.ctx, ent.OpQueryCount) + if err := ccq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, ccq, querierCount[*CategoryContentQuery](), ccq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (ccq *CategoryContentQuery) CountX(ctx context.Context) int { + count, err := ccq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (ccq *CategoryContentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, ccq.ctx, ent.OpQueryExist) + switch _, err := ccq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (ccq *CategoryContentQuery) ExistX(ctx context.Context) bool { + exist, err := ccq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the CategoryContentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (ccq *CategoryContentQuery) Clone() *CategoryContentQuery { + if ccq == nil { + return nil + } + return &CategoryContentQuery{ + config: ccq.config, + ctx: ccq.ctx.Clone(), + order: append([]categorycontent.OrderOption{}, ccq.order...), + inters: append([]Interceptor{}, ccq.inters...), + predicates: append([]predicate.CategoryContent{}, ccq.predicates...), + withCategory: ccq.withCategory.Clone(), + // clone intermediate query. + sql: ccq.sql.Clone(), + path: ccq.path, + } +} + +// WithCategory tells the query-builder to eager-load the nodes that are connected to +// the "category" edge. The optional arguments are used to configure the query builder of the edge. +func (ccq *CategoryContentQuery) WithCategory(opts ...func(*CategoryQuery)) *CategoryContentQuery { + query := (&CategoryClient{config: ccq.config}).Query() + for _, opt := range opts { + opt(query) + } + ccq.withCategory = query + return ccq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// LanguageCode categorycontent.LanguageCode `json:"language_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.CategoryContent.Query(). +// GroupBy(categorycontent.FieldLanguageCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (ccq *CategoryContentQuery) GroupBy(field string, fields ...string) *CategoryContentGroupBy { + ccq.ctx.Fields = append([]string{field}, fields...) + grbuild := &CategoryContentGroupBy{build: ccq} + grbuild.flds = &ccq.ctx.Fields + grbuild.label = categorycontent.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// LanguageCode categorycontent.LanguageCode `json:"language_code,omitempty"` +// } +// +// client.CategoryContent.Query(). +// Select(categorycontent.FieldLanguageCode). +// Scan(ctx, &v) +func (ccq *CategoryContentQuery) Select(fields ...string) *CategoryContentSelect { + ccq.ctx.Fields = append(ccq.ctx.Fields, fields...) + sbuild := &CategoryContentSelect{CategoryContentQuery: ccq} + sbuild.label = categorycontent.Label + sbuild.flds, sbuild.scan = &ccq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a CategoryContentSelect configured with the given aggregations. +func (ccq *CategoryContentQuery) Aggregate(fns ...AggregateFunc) *CategoryContentSelect { + return ccq.Select().Aggregate(fns...) +} + +func (ccq *CategoryContentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range ccq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, ccq); err != nil { + return err + } + } + } + for _, f := range ccq.ctx.Fields { + if !categorycontent.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if ccq.path != nil { + prev, err := ccq.path(ctx) + if err != nil { + return err + } + ccq.sql = prev + } + return nil +} + +func (ccq *CategoryContentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*CategoryContent, error) { + var ( + nodes = []*CategoryContent{} + withFKs = ccq.withFKs + _spec = ccq.querySpec() + loadedTypes = [1]bool{ + ccq.withCategory != nil, + } + ) + if ccq.withCategory != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, categorycontent.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*CategoryContent).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &CategoryContent{config: ccq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, ccq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := ccq.withCategory; query != nil { + if err := ccq.loadCategory(ctx, query, nodes, nil, + func(n *CategoryContent, e *Category) { n.Edges.Category = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (ccq *CategoryContentQuery) loadCategory(ctx context.Context, query *CategoryQuery, nodes []*CategoryContent, init func(*CategoryContent), assign func(*CategoryContent, *Category)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*CategoryContent) + for i := range nodes { + if nodes[i].category_contents == nil { + continue + } + fk := *nodes[i].category_contents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(category.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "category_contents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (ccq *CategoryContentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := ccq.querySpec() + _spec.Node.Columns = ccq.ctx.Fields + if len(ccq.ctx.Fields) > 0 { + _spec.Unique = ccq.ctx.Unique != nil && *ccq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, ccq.driver, _spec) +} + +func (ccq *CategoryContentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(categorycontent.Table, categorycontent.Columns, sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt)) + _spec.From = ccq.sql + if unique := ccq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if ccq.path != nil { + _spec.Unique = true + } + if fields := ccq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, categorycontent.FieldID) + for i := range fields { + if fields[i] != categorycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := ccq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := ccq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := ccq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := ccq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (ccq *CategoryContentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(ccq.driver.Dialect()) + t1 := builder.Table(categorycontent.Table) + columns := ccq.ctx.Fields + if len(columns) == 0 { + columns = categorycontent.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if ccq.sql != nil { + selector = ccq.sql + selector.Select(selector.Columns(columns...)...) + } + if ccq.ctx.Unique != nil && *ccq.ctx.Unique { + selector.Distinct() + } + for _, p := range ccq.predicates { + p(selector) + } + for _, p := range ccq.order { + p(selector) + } + if offset := ccq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := ccq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// CategoryContentGroupBy is the group-by builder for CategoryContent entities. +type CategoryContentGroupBy struct { + selector + build *CategoryContentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ccgb *CategoryContentGroupBy) Aggregate(fns ...AggregateFunc) *CategoryContentGroupBy { + ccgb.fns = append(ccgb.fns, fns...) + return ccgb +} + +// Scan applies the selector query and scans the result into the given value. +func (ccgb *CategoryContentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ccgb.build.ctx, ent.OpQueryGroupBy) + if err := ccgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CategoryContentQuery, *CategoryContentGroupBy](ctx, ccgb.build, ccgb, ccgb.build.inters, v) +} + +func (ccgb *CategoryContentGroupBy) sqlScan(ctx context.Context, root *CategoryContentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ccgb.fns)) + for _, fn := range ccgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ccgb.flds)+len(ccgb.fns)) + for _, f := range *ccgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ccgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ccgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// CategoryContentSelect is the builder for selecting fields of CategoryContent entities. +type CategoryContentSelect struct { + *CategoryContentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ccs *CategoryContentSelect) Aggregate(fns ...AggregateFunc) *CategoryContentSelect { + ccs.fns = append(ccs.fns, fns...) + return ccs +} + +// Scan applies the selector query and scans the result into the given value. +func (ccs *CategoryContentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ccs.ctx, ent.OpQuerySelect) + if err := ccs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*CategoryContentQuery, *CategoryContentSelect](ctx, ccs.CategoryContentQuery, ccs, ccs.inters, v) +} + +func (ccs *CategoryContentSelect) sqlScan(ctx context.Context, root *CategoryContentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ccs.fns)) + for _, fn := range ccs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ccs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ccs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/categorycontent_update.go b/backend/ent/categorycontent_update.go new file mode 100644 index 0000000..546c821 --- /dev/null +++ b/backend/ent/categorycontent_update.go @@ -0,0 +1,484 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// CategoryContentUpdate is the builder for updating CategoryContent entities. +type CategoryContentUpdate struct { + config + hooks []Hook + mutation *CategoryContentMutation +} + +// Where appends a list predicates to the CategoryContentUpdate builder. +func (ccu *CategoryContentUpdate) Where(ps ...predicate.CategoryContent) *CategoryContentUpdate { + ccu.mutation.Where(ps...) + return ccu +} + +// SetLanguageCode sets the "language_code" field. +func (ccu *CategoryContentUpdate) SetLanguageCode(cc categorycontent.LanguageCode) *CategoryContentUpdate { + ccu.mutation.SetLanguageCode(cc) + return ccu +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (ccu *CategoryContentUpdate) SetNillableLanguageCode(cc *categorycontent.LanguageCode) *CategoryContentUpdate { + if cc != nil { + ccu.SetLanguageCode(*cc) + } + return ccu +} + +// SetName sets the "name" field. +func (ccu *CategoryContentUpdate) SetName(s string) *CategoryContentUpdate { + ccu.mutation.SetName(s) + return ccu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (ccu *CategoryContentUpdate) SetNillableName(s *string) *CategoryContentUpdate { + if s != nil { + ccu.SetName(*s) + } + return ccu +} + +// SetDescription sets the "description" field. +func (ccu *CategoryContentUpdate) SetDescription(s string) *CategoryContentUpdate { + ccu.mutation.SetDescription(s) + return ccu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (ccu *CategoryContentUpdate) SetNillableDescription(s *string) *CategoryContentUpdate { + if s != nil { + ccu.SetDescription(*s) + } + return ccu +} + +// ClearDescription clears the value of the "description" field. +func (ccu *CategoryContentUpdate) ClearDescription() *CategoryContentUpdate { + ccu.mutation.ClearDescription() + return ccu +} + +// SetSlug sets the "slug" field. +func (ccu *CategoryContentUpdate) SetSlug(s string) *CategoryContentUpdate { + ccu.mutation.SetSlug(s) + return ccu +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (ccu *CategoryContentUpdate) SetNillableSlug(s *string) *CategoryContentUpdate { + if s != nil { + ccu.SetSlug(*s) + } + return ccu +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (ccu *CategoryContentUpdate) SetCategoryID(id int) *CategoryContentUpdate { + ccu.mutation.SetCategoryID(id) + return ccu +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (ccu *CategoryContentUpdate) SetNillableCategoryID(id *int) *CategoryContentUpdate { + if id != nil { + ccu = ccu.SetCategoryID(*id) + } + return ccu +} + +// SetCategory sets the "category" edge to the Category entity. +func (ccu *CategoryContentUpdate) SetCategory(c *Category) *CategoryContentUpdate { + return ccu.SetCategoryID(c.ID) +} + +// Mutation returns the CategoryContentMutation object of the builder. +func (ccu *CategoryContentUpdate) Mutation() *CategoryContentMutation { + return ccu.mutation +} + +// ClearCategory clears the "category" edge to the Category entity. +func (ccu *CategoryContentUpdate) ClearCategory() *CategoryContentUpdate { + ccu.mutation.ClearCategory() + return ccu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (ccu *CategoryContentUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, ccu.sqlSave, ccu.mutation, ccu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ccu *CategoryContentUpdate) SaveX(ctx context.Context) int { + affected, err := ccu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (ccu *CategoryContentUpdate) Exec(ctx context.Context) error { + _, err := ccu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccu *CategoryContentUpdate) ExecX(ctx context.Context) { + if err := ccu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ccu *CategoryContentUpdate) check() error { + if v, ok := ccu.mutation.LanguageCode(); ok { + if err := categorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.language_code": %w`, err)} + } + } + if v, ok := ccu.mutation.Name(); ok { + if err := categorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.name": %w`, err)} + } + } + if v, ok := ccu.mutation.Slug(); ok { + if err := categorycontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.slug": %w`, err)} + } + } + return nil +} + +func (ccu *CategoryContentUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := ccu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(categorycontent.Table, categorycontent.Columns, sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt)) + if ps := ccu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ccu.mutation.LanguageCode(); ok { + _spec.SetField(categorycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := ccu.mutation.Name(); ok { + _spec.SetField(categorycontent.FieldName, field.TypeString, value) + } + if value, ok := ccu.mutation.Description(); ok { + _spec.SetField(categorycontent.FieldDescription, field.TypeString, value) + } + if ccu.mutation.DescriptionCleared() { + _spec.ClearField(categorycontent.FieldDescription, field.TypeString) + } + if value, ok := ccu.mutation.Slug(); ok { + _spec.SetField(categorycontent.FieldSlug, field.TypeString, value) + } + if ccu.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: categorycontent.CategoryTable, + Columns: []string{categorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ccu.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: categorycontent.CategoryTable, + Columns: []string{categorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, ccu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{categorycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + ccu.mutation.done = true + return n, nil +} + +// CategoryContentUpdateOne is the builder for updating a single CategoryContent entity. +type CategoryContentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *CategoryContentMutation +} + +// SetLanguageCode sets the "language_code" field. +func (ccuo *CategoryContentUpdateOne) SetLanguageCode(cc categorycontent.LanguageCode) *CategoryContentUpdateOne { + ccuo.mutation.SetLanguageCode(cc) + return ccuo +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (ccuo *CategoryContentUpdateOne) SetNillableLanguageCode(cc *categorycontent.LanguageCode) *CategoryContentUpdateOne { + if cc != nil { + ccuo.SetLanguageCode(*cc) + } + return ccuo +} + +// SetName sets the "name" field. +func (ccuo *CategoryContentUpdateOne) SetName(s string) *CategoryContentUpdateOne { + ccuo.mutation.SetName(s) + return ccuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (ccuo *CategoryContentUpdateOne) SetNillableName(s *string) *CategoryContentUpdateOne { + if s != nil { + ccuo.SetName(*s) + } + return ccuo +} + +// SetDescription sets the "description" field. +func (ccuo *CategoryContentUpdateOne) SetDescription(s string) *CategoryContentUpdateOne { + ccuo.mutation.SetDescription(s) + return ccuo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (ccuo *CategoryContentUpdateOne) SetNillableDescription(s *string) *CategoryContentUpdateOne { + if s != nil { + ccuo.SetDescription(*s) + } + return ccuo +} + +// ClearDescription clears the value of the "description" field. +func (ccuo *CategoryContentUpdateOne) ClearDescription() *CategoryContentUpdateOne { + ccuo.mutation.ClearDescription() + return ccuo +} + +// SetSlug sets the "slug" field. +func (ccuo *CategoryContentUpdateOne) SetSlug(s string) *CategoryContentUpdateOne { + ccuo.mutation.SetSlug(s) + return ccuo +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (ccuo *CategoryContentUpdateOne) SetNillableSlug(s *string) *CategoryContentUpdateOne { + if s != nil { + ccuo.SetSlug(*s) + } + return ccuo +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (ccuo *CategoryContentUpdateOne) SetCategoryID(id int) *CategoryContentUpdateOne { + ccuo.mutation.SetCategoryID(id) + return ccuo +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (ccuo *CategoryContentUpdateOne) SetNillableCategoryID(id *int) *CategoryContentUpdateOne { + if id != nil { + ccuo = ccuo.SetCategoryID(*id) + } + return ccuo +} + +// SetCategory sets the "category" edge to the Category entity. +func (ccuo *CategoryContentUpdateOne) SetCategory(c *Category) *CategoryContentUpdateOne { + return ccuo.SetCategoryID(c.ID) +} + +// Mutation returns the CategoryContentMutation object of the builder. +func (ccuo *CategoryContentUpdateOne) Mutation() *CategoryContentMutation { + return ccuo.mutation +} + +// ClearCategory clears the "category" edge to the Category entity. +func (ccuo *CategoryContentUpdateOne) ClearCategory() *CategoryContentUpdateOne { + ccuo.mutation.ClearCategory() + return ccuo +} + +// Where appends a list predicates to the CategoryContentUpdate builder. +func (ccuo *CategoryContentUpdateOne) Where(ps ...predicate.CategoryContent) *CategoryContentUpdateOne { + ccuo.mutation.Where(ps...) + return ccuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (ccuo *CategoryContentUpdateOne) Select(field string, fields ...string) *CategoryContentUpdateOne { + ccuo.fields = append([]string{field}, fields...) + return ccuo +} + +// Save executes the query and returns the updated CategoryContent entity. +func (ccuo *CategoryContentUpdateOne) Save(ctx context.Context) (*CategoryContent, error) { + return withHooks(ctx, ccuo.sqlSave, ccuo.mutation, ccuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ccuo *CategoryContentUpdateOne) SaveX(ctx context.Context) *CategoryContent { + node, err := ccuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (ccuo *CategoryContentUpdateOne) Exec(ctx context.Context) error { + _, err := ccuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccuo *CategoryContentUpdateOne) ExecX(ctx context.Context) { + if err := ccuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ccuo *CategoryContentUpdateOne) check() error { + if v, ok := ccuo.mutation.LanguageCode(); ok { + if err := categorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.language_code": %w`, err)} + } + } + if v, ok := ccuo.mutation.Name(); ok { + if err := categorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.name": %w`, err)} + } + } + if v, ok := ccuo.mutation.Slug(); ok { + if err := categorycontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "CategoryContent.slug": %w`, err)} + } + } + return nil +} + +func (ccuo *CategoryContentUpdateOne) sqlSave(ctx context.Context) (_node *CategoryContent, err error) { + if err := ccuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(categorycontent.Table, categorycontent.Columns, sqlgraph.NewFieldSpec(categorycontent.FieldID, field.TypeInt)) + id, ok := ccuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "CategoryContent.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := ccuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, categorycontent.FieldID) + for _, f := range fields { + if !categorycontent.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != categorycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := ccuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ccuo.mutation.LanguageCode(); ok { + _spec.SetField(categorycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := ccuo.mutation.Name(); ok { + _spec.SetField(categorycontent.FieldName, field.TypeString, value) + } + if value, ok := ccuo.mutation.Description(); ok { + _spec.SetField(categorycontent.FieldDescription, field.TypeString, value) + } + if ccuo.mutation.DescriptionCleared() { + _spec.ClearField(categorycontent.FieldDescription, field.TypeString) + } + if value, ok := ccuo.mutation.Slug(); ok { + _spec.SetField(categorycontent.FieldSlug, field.TypeString, value) + } + if ccuo.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: categorycontent.CategoryTable, + Columns: []string{categorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ccuo.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: categorycontent.CategoryTable, + Columns: []string{categorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &CategoryContent{config: ccuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, ccuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{categorycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + ccuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go new file mode 100644 index 0000000..bbcf6f3 --- /dev/null +++ b/backend/ent/client.go @@ -0,0 +1,2939 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "tss-rocks-be/ent/migrate" + + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Category is the client for interacting with the Category builders. + Category *CategoryClient + // CategoryContent is the client for interacting with the CategoryContent builders. + CategoryContent *CategoryContentClient + // Contributor is the client for interacting with the Contributor builders. + Contributor *ContributorClient + // ContributorRole is the client for interacting with the ContributorRole builders. + ContributorRole *ContributorRoleClient + // ContributorSocialLink is the client for interacting with the ContributorSocialLink builders. + ContributorSocialLink *ContributorSocialLinkClient + // Daily is the client for interacting with the Daily builders. + Daily *DailyClient + // DailyCategory is the client for interacting with the DailyCategory builders. + DailyCategory *DailyCategoryClient + // DailyCategoryContent is the client for interacting with the DailyCategoryContent builders. + DailyCategoryContent *DailyCategoryContentClient + // DailyContent is the client for interacting with the DailyContent builders. + DailyContent *DailyContentClient + // Media is the client for interacting with the Media builders. + Media *MediaClient + // Permission is the client for interacting with the Permission builders. + Permission *PermissionClient + // Post is the client for interacting with the Post builders. + Post *PostClient + // PostContent is the client for interacting with the PostContent builders. + PostContent *PostContentClient + // PostContributor is the client for interacting with the PostContributor builders. + PostContributor *PostContributorClient + // Role is the client for interacting with the Role builders. + Role *RoleClient + // User is the client for interacting with the User builders. + User *UserClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Category = NewCategoryClient(c.config) + c.CategoryContent = NewCategoryContentClient(c.config) + c.Contributor = NewContributorClient(c.config) + c.ContributorRole = NewContributorRoleClient(c.config) + c.ContributorSocialLink = NewContributorSocialLinkClient(c.config) + c.Daily = NewDailyClient(c.config) + c.DailyCategory = NewDailyCategoryClient(c.config) + c.DailyCategoryContent = NewDailyCategoryContentClient(c.config) + c.DailyContent = NewDailyContentClient(c.config) + c.Media = NewMediaClient(c.config) + c.Permission = NewPermissionClient(c.config) + c.Post = NewPostClient(c.config) + c.PostContent = NewPostContentClient(c.config) + c.PostContributor = NewPostContributorClient(c.config) + c.Role = NewRoleClient(c.config) + c.User = NewUserClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Category: NewCategoryClient(cfg), + CategoryContent: NewCategoryContentClient(cfg), + Contributor: NewContributorClient(cfg), + ContributorRole: NewContributorRoleClient(cfg), + ContributorSocialLink: NewContributorSocialLinkClient(cfg), + Daily: NewDailyClient(cfg), + DailyCategory: NewDailyCategoryClient(cfg), + DailyCategoryContent: NewDailyCategoryContentClient(cfg), + DailyContent: NewDailyContentClient(cfg), + Media: NewMediaClient(cfg), + Permission: NewPermissionClient(cfg), + Post: NewPostClient(cfg), + PostContent: NewPostContentClient(cfg), + PostContributor: NewPostContributorClient(cfg), + Role: NewRoleClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Category: NewCategoryClient(cfg), + CategoryContent: NewCategoryContentClient(cfg), + Contributor: NewContributorClient(cfg), + ContributorRole: NewContributorRoleClient(cfg), + ContributorSocialLink: NewContributorSocialLinkClient(cfg), + Daily: NewDailyClient(cfg), + DailyCategory: NewDailyCategoryClient(cfg), + DailyCategoryContent: NewDailyCategoryContentClient(cfg), + DailyContent: NewDailyContentClient(cfg), + Media: NewMediaClient(cfg), + Permission: NewPermissionClient(cfg), + Post: NewPostClient(cfg), + PostContent: NewPostContentClient(cfg), + PostContributor: NewPostContributorClient(cfg), + Role: NewRoleClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Category. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.Category, c.CategoryContent, c.Contributor, c.ContributorRole, + c.ContributorSocialLink, c.Daily, c.DailyCategory, c.DailyCategoryContent, + c.DailyContent, c.Media, c.Permission, c.Post, c.PostContent, + c.PostContributor, c.Role, c.User, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Category, c.CategoryContent, c.Contributor, c.ContributorRole, + c.ContributorSocialLink, c.Daily, c.DailyCategory, c.DailyCategoryContent, + c.DailyContent, c.Media, c.Permission, c.Post, c.PostContent, + c.PostContributor, c.Role, c.User, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *CategoryMutation: + return c.Category.mutate(ctx, m) + case *CategoryContentMutation: + return c.CategoryContent.mutate(ctx, m) + case *ContributorMutation: + return c.Contributor.mutate(ctx, m) + case *ContributorRoleMutation: + return c.ContributorRole.mutate(ctx, m) + case *ContributorSocialLinkMutation: + return c.ContributorSocialLink.mutate(ctx, m) + case *DailyMutation: + return c.Daily.mutate(ctx, m) + case *DailyCategoryMutation: + return c.DailyCategory.mutate(ctx, m) + case *DailyCategoryContentMutation: + return c.DailyCategoryContent.mutate(ctx, m) + case *DailyContentMutation: + return c.DailyContent.mutate(ctx, m) + case *MediaMutation: + return c.Media.mutate(ctx, m) + case *PermissionMutation: + return c.Permission.mutate(ctx, m) + case *PostMutation: + return c.Post.mutate(ctx, m) + case *PostContentMutation: + return c.PostContent.mutate(ctx, m) + case *PostContributorMutation: + return c.PostContributor.mutate(ctx, m) + case *RoleMutation: + return c.Role.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// CategoryClient is a client for the Category schema. +type CategoryClient struct { + config +} + +// NewCategoryClient returns a client for the Category from the given config. +func NewCategoryClient(c config) *CategoryClient { + return &CategoryClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `category.Hooks(f(g(h())))`. +func (c *CategoryClient) Use(hooks ...Hook) { + c.hooks.Category = append(c.hooks.Category, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `category.Intercept(f(g(h())))`. +func (c *CategoryClient) Intercept(interceptors ...Interceptor) { + c.inters.Category = append(c.inters.Category, interceptors...) +} + +// Create returns a builder for creating a Category entity. +func (c *CategoryClient) Create() *CategoryCreate { + mutation := newCategoryMutation(c.config, OpCreate) + return &CategoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Category entities. +func (c *CategoryClient) CreateBulk(builders ...*CategoryCreate) *CategoryCreateBulk { + return &CategoryCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *CategoryClient) MapCreateBulk(slice any, setFunc func(*CategoryCreate, int)) *CategoryCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &CategoryCreateBulk{err: fmt.Errorf("calling to CategoryClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*CategoryCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &CategoryCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Category. +func (c *CategoryClient) Update() *CategoryUpdate { + mutation := newCategoryMutation(c.config, OpUpdate) + return &CategoryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *CategoryClient) UpdateOne(ca *Category) *CategoryUpdateOne { + mutation := newCategoryMutation(c.config, OpUpdateOne, withCategory(ca)) + return &CategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *CategoryClient) UpdateOneID(id int) *CategoryUpdateOne { + mutation := newCategoryMutation(c.config, OpUpdateOne, withCategoryID(id)) + return &CategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Category. +func (c *CategoryClient) Delete() *CategoryDelete { + mutation := newCategoryMutation(c.config, OpDelete) + return &CategoryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *CategoryClient) DeleteOne(ca *Category) *CategoryDeleteOne { + return c.DeleteOneID(ca.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *CategoryClient) DeleteOneID(id int) *CategoryDeleteOne { + builder := c.Delete().Where(category.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &CategoryDeleteOne{builder} +} + +// Query returns a query builder for Category. +func (c *CategoryClient) Query() *CategoryQuery { + return &CategoryQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeCategory}, + inters: c.Interceptors(), + } +} + +// Get returns a Category entity by its id. +func (c *CategoryClient) Get(ctx context.Context, id int) (*Category, error) { + return c.Query().Where(category.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *CategoryClient) GetX(ctx context.Context, id int) *Category { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryContents queries the contents edge of a Category. +func (c *CategoryClient) QueryContents(ca *Category) *CategoryContentQuery { + query := (&CategoryContentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := ca.ID + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, id), + sqlgraph.To(categorycontent.Table, categorycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.ContentsTable, category.ContentsColumn), + ) + fromV = sqlgraph.Neighbors(ca.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryPosts queries the posts edge of a Category. +func (c *CategoryClient) QueryPosts(ca *Category) *PostQuery { + query := (&PostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := ca.ID + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, id), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.PostsTable, category.PostsColumn), + ) + fromV = sqlgraph.Neighbors(ca.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDailyItems queries the daily_items edge of a Category. +func (c *CategoryClient) QueryDailyItems(ca *Category) *DailyQuery { + query := (&DailyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := ca.ID + step := sqlgraph.NewStep( + sqlgraph.From(category.Table, category.FieldID, id), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, category.DailyItemsTable, category.DailyItemsColumn), + ) + fromV = sqlgraph.Neighbors(ca.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *CategoryClient) Hooks() []Hook { + return c.hooks.Category +} + +// Interceptors returns the client interceptors. +func (c *CategoryClient) Interceptors() []Interceptor { + return c.inters.Category +} + +func (c *CategoryClient) mutate(ctx context.Context, m *CategoryMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&CategoryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&CategoryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&CategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&CategoryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Category mutation op: %q", m.Op()) + } +} + +// CategoryContentClient is a client for the CategoryContent schema. +type CategoryContentClient struct { + config +} + +// NewCategoryContentClient returns a client for the CategoryContent from the given config. +func NewCategoryContentClient(c config) *CategoryContentClient { + return &CategoryContentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `categorycontent.Hooks(f(g(h())))`. +func (c *CategoryContentClient) Use(hooks ...Hook) { + c.hooks.CategoryContent = append(c.hooks.CategoryContent, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `categorycontent.Intercept(f(g(h())))`. +func (c *CategoryContentClient) Intercept(interceptors ...Interceptor) { + c.inters.CategoryContent = append(c.inters.CategoryContent, interceptors...) +} + +// Create returns a builder for creating a CategoryContent entity. +func (c *CategoryContentClient) Create() *CategoryContentCreate { + mutation := newCategoryContentMutation(c.config, OpCreate) + return &CategoryContentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of CategoryContent entities. +func (c *CategoryContentClient) CreateBulk(builders ...*CategoryContentCreate) *CategoryContentCreateBulk { + return &CategoryContentCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *CategoryContentClient) MapCreateBulk(slice any, setFunc func(*CategoryContentCreate, int)) *CategoryContentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &CategoryContentCreateBulk{err: fmt.Errorf("calling to CategoryContentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*CategoryContentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &CategoryContentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for CategoryContent. +func (c *CategoryContentClient) Update() *CategoryContentUpdate { + mutation := newCategoryContentMutation(c.config, OpUpdate) + return &CategoryContentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *CategoryContentClient) UpdateOne(cc *CategoryContent) *CategoryContentUpdateOne { + mutation := newCategoryContentMutation(c.config, OpUpdateOne, withCategoryContent(cc)) + return &CategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *CategoryContentClient) UpdateOneID(id int) *CategoryContentUpdateOne { + mutation := newCategoryContentMutation(c.config, OpUpdateOne, withCategoryContentID(id)) + return &CategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for CategoryContent. +func (c *CategoryContentClient) Delete() *CategoryContentDelete { + mutation := newCategoryContentMutation(c.config, OpDelete) + return &CategoryContentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *CategoryContentClient) DeleteOne(cc *CategoryContent) *CategoryContentDeleteOne { + return c.DeleteOneID(cc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *CategoryContentClient) DeleteOneID(id int) *CategoryContentDeleteOne { + builder := c.Delete().Where(categorycontent.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &CategoryContentDeleteOne{builder} +} + +// Query returns a query builder for CategoryContent. +func (c *CategoryContentClient) Query() *CategoryContentQuery { + return &CategoryContentQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeCategoryContent}, + inters: c.Interceptors(), + } +} + +// Get returns a CategoryContent entity by its id. +func (c *CategoryContentClient) Get(ctx context.Context, id int) (*CategoryContent, error) { + return c.Query().Where(categorycontent.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *CategoryContentClient) GetX(ctx context.Context, id int) *CategoryContent { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryCategory queries the category edge of a CategoryContent. +func (c *CategoryContentClient) QueryCategory(cc *CategoryContent) *CategoryQuery { + query := (&CategoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := cc.ID + step := sqlgraph.NewStep( + sqlgraph.From(categorycontent.Table, categorycontent.FieldID, id), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, categorycontent.CategoryTable, categorycontent.CategoryColumn), + ) + fromV = sqlgraph.Neighbors(cc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *CategoryContentClient) Hooks() []Hook { + return c.hooks.CategoryContent +} + +// Interceptors returns the client interceptors. +func (c *CategoryContentClient) Interceptors() []Interceptor { + return c.inters.CategoryContent +} + +func (c *CategoryContentClient) mutate(ctx context.Context, m *CategoryContentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&CategoryContentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&CategoryContentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&CategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&CategoryContentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown CategoryContent mutation op: %q", m.Op()) + } +} + +// ContributorClient is a client for the Contributor schema. +type ContributorClient struct { + config +} + +// NewContributorClient returns a client for the Contributor from the given config. +func NewContributorClient(c config) *ContributorClient { + return &ContributorClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `contributor.Hooks(f(g(h())))`. +func (c *ContributorClient) Use(hooks ...Hook) { + c.hooks.Contributor = append(c.hooks.Contributor, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `contributor.Intercept(f(g(h())))`. +func (c *ContributorClient) Intercept(interceptors ...Interceptor) { + c.inters.Contributor = append(c.inters.Contributor, interceptors...) +} + +// Create returns a builder for creating a Contributor entity. +func (c *ContributorClient) Create() *ContributorCreate { + mutation := newContributorMutation(c.config, OpCreate) + return &ContributorCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Contributor entities. +func (c *ContributorClient) CreateBulk(builders ...*ContributorCreate) *ContributorCreateBulk { + return &ContributorCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ContributorClient) MapCreateBulk(slice any, setFunc func(*ContributorCreate, int)) *ContributorCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ContributorCreateBulk{err: fmt.Errorf("calling to ContributorClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ContributorCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ContributorCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Contributor. +func (c *ContributorClient) Update() *ContributorUpdate { + mutation := newContributorMutation(c.config, OpUpdate) + return &ContributorUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ContributorClient) UpdateOne(co *Contributor) *ContributorUpdateOne { + mutation := newContributorMutation(c.config, OpUpdateOne, withContributor(co)) + return &ContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ContributorClient) UpdateOneID(id int) *ContributorUpdateOne { + mutation := newContributorMutation(c.config, OpUpdateOne, withContributorID(id)) + return &ContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Contributor. +func (c *ContributorClient) Delete() *ContributorDelete { + mutation := newContributorMutation(c.config, OpDelete) + return &ContributorDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ContributorClient) DeleteOne(co *Contributor) *ContributorDeleteOne { + return c.DeleteOneID(co.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ContributorClient) DeleteOneID(id int) *ContributorDeleteOne { + builder := c.Delete().Where(contributor.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ContributorDeleteOne{builder} +} + +// Query returns a query builder for Contributor. +func (c *ContributorClient) Query() *ContributorQuery { + return &ContributorQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeContributor}, + inters: c.Interceptors(), + } +} + +// Get returns a Contributor entity by its id. +func (c *ContributorClient) Get(ctx context.Context, id int) (*Contributor, error) { + return c.Query().Where(contributor.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ContributorClient) GetX(ctx context.Context, id int) *Contributor { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a Contributor. +func (c *ContributorClient) QueryUser(co *Contributor) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := co.ID + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, contributor.UserTable, contributor.UserColumn), + ) + fromV = sqlgraph.Neighbors(co.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySocialLinks queries the social_links edge of a Contributor. +func (c *ContributorClient) QuerySocialLinks(co *Contributor) *ContributorSocialLinkQuery { + query := (&ContributorSocialLinkClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := co.ID + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, id), + sqlgraph.To(contributorsociallink.Table, contributorsociallink.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributor.SocialLinksTable, contributor.SocialLinksColumn), + ) + fromV = sqlgraph.Neighbors(co.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryPosts queries the posts edge of a Contributor. +func (c *ContributorClient) QueryPosts(co *Contributor) *PostContributorQuery { + query := (&PostContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := co.ID + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, id), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributor.PostsTable, contributor.PostsColumn), + ) + fromV = sqlgraph.Neighbors(co.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ContributorClient) Hooks() []Hook { + return c.hooks.Contributor +} + +// Interceptors returns the client interceptors. +func (c *ContributorClient) Interceptors() []Interceptor { + return c.inters.Contributor +} + +func (c *ContributorClient) mutate(ctx context.Context, m *ContributorMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ContributorCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ContributorUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ContributorDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Contributor mutation op: %q", m.Op()) + } +} + +// ContributorRoleClient is a client for the ContributorRole schema. +type ContributorRoleClient struct { + config +} + +// NewContributorRoleClient returns a client for the ContributorRole from the given config. +func NewContributorRoleClient(c config) *ContributorRoleClient { + return &ContributorRoleClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `contributorrole.Hooks(f(g(h())))`. +func (c *ContributorRoleClient) Use(hooks ...Hook) { + c.hooks.ContributorRole = append(c.hooks.ContributorRole, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `contributorrole.Intercept(f(g(h())))`. +func (c *ContributorRoleClient) Intercept(interceptors ...Interceptor) { + c.inters.ContributorRole = append(c.inters.ContributorRole, interceptors...) +} + +// Create returns a builder for creating a ContributorRole entity. +func (c *ContributorRoleClient) Create() *ContributorRoleCreate { + mutation := newContributorRoleMutation(c.config, OpCreate) + return &ContributorRoleCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ContributorRole entities. +func (c *ContributorRoleClient) CreateBulk(builders ...*ContributorRoleCreate) *ContributorRoleCreateBulk { + return &ContributorRoleCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ContributorRoleClient) MapCreateBulk(slice any, setFunc func(*ContributorRoleCreate, int)) *ContributorRoleCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ContributorRoleCreateBulk{err: fmt.Errorf("calling to ContributorRoleClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ContributorRoleCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ContributorRoleCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ContributorRole. +func (c *ContributorRoleClient) Update() *ContributorRoleUpdate { + mutation := newContributorRoleMutation(c.config, OpUpdate) + return &ContributorRoleUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ContributorRoleClient) UpdateOne(cr *ContributorRole) *ContributorRoleUpdateOne { + mutation := newContributorRoleMutation(c.config, OpUpdateOne, withContributorRole(cr)) + return &ContributorRoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ContributorRoleClient) UpdateOneID(id int) *ContributorRoleUpdateOne { + mutation := newContributorRoleMutation(c.config, OpUpdateOne, withContributorRoleID(id)) + return &ContributorRoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ContributorRole. +func (c *ContributorRoleClient) Delete() *ContributorRoleDelete { + mutation := newContributorRoleMutation(c.config, OpDelete) + return &ContributorRoleDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ContributorRoleClient) DeleteOne(cr *ContributorRole) *ContributorRoleDeleteOne { + return c.DeleteOneID(cr.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ContributorRoleClient) DeleteOneID(id int) *ContributorRoleDeleteOne { + builder := c.Delete().Where(contributorrole.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ContributorRoleDeleteOne{builder} +} + +// Query returns a query builder for ContributorRole. +func (c *ContributorRoleClient) Query() *ContributorRoleQuery { + return &ContributorRoleQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeContributorRole}, + inters: c.Interceptors(), + } +} + +// Get returns a ContributorRole entity by its id. +func (c *ContributorRoleClient) Get(ctx context.Context, id int) (*ContributorRole, error) { + return c.Query().Where(contributorrole.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ContributorRoleClient) GetX(ctx context.Context, id int) *ContributorRole { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPostContributors queries the post_contributors edge of a ContributorRole. +func (c *ContributorRoleClient) QueryPostContributors(cr *ContributorRole) *PostContributorQuery { + query := (&PostContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := cr.ID + step := sqlgraph.NewStep( + sqlgraph.From(contributorrole.Table, contributorrole.FieldID, id), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributorrole.PostContributorsTable, contributorrole.PostContributorsColumn), + ) + fromV = sqlgraph.Neighbors(cr.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ContributorRoleClient) Hooks() []Hook { + return c.hooks.ContributorRole +} + +// Interceptors returns the client interceptors. +func (c *ContributorRoleClient) Interceptors() []Interceptor { + return c.inters.ContributorRole +} + +func (c *ContributorRoleClient) mutate(ctx context.Context, m *ContributorRoleMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ContributorRoleCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ContributorRoleUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ContributorRoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ContributorRoleDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ContributorRole mutation op: %q", m.Op()) + } +} + +// ContributorSocialLinkClient is a client for the ContributorSocialLink schema. +type ContributorSocialLinkClient struct { + config +} + +// NewContributorSocialLinkClient returns a client for the ContributorSocialLink from the given config. +func NewContributorSocialLinkClient(c config) *ContributorSocialLinkClient { + return &ContributorSocialLinkClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `contributorsociallink.Hooks(f(g(h())))`. +func (c *ContributorSocialLinkClient) Use(hooks ...Hook) { + c.hooks.ContributorSocialLink = append(c.hooks.ContributorSocialLink, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `contributorsociallink.Intercept(f(g(h())))`. +func (c *ContributorSocialLinkClient) Intercept(interceptors ...Interceptor) { + c.inters.ContributorSocialLink = append(c.inters.ContributorSocialLink, interceptors...) +} + +// Create returns a builder for creating a ContributorSocialLink entity. +func (c *ContributorSocialLinkClient) Create() *ContributorSocialLinkCreate { + mutation := newContributorSocialLinkMutation(c.config, OpCreate) + return &ContributorSocialLinkCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ContributorSocialLink entities. +func (c *ContributorSocialLinkClient) CreateBulk(builders ...*ContributorSocialLinkCreate) *ContributorSocialLinkCreateBulk { + return &ContributorSocialLinkCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ContributorSocialLinkClient) MapCreateBulk(slice any, setFunc func(*ContributorSocialLinkCreate, int)) *ContributorSocialLinkCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ContributorSocialLinkCreateBulk{err: fmt.Errorf("calling to ContributorSocialLinkClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ContributorSocialLinkCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ContributorSocialLinkCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ContributorSocialLink. +func (c *ContributorSocialLinkClient) Update() *ContributorSocialLinkUpdate { + mutation := newContributorSocialLinkMutation(c.config, OpUpdate) + return &ContributorSocialLinkUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ContributorSocialLinkClient) UpdateOne(csl *ContributorSocialLink) *ContributorSocialLinkUpdateOne { + mutation := newContributorSocialLinkMutation(c.config, OpUpdateOne, withContributorSocialLink(csl)) + return &ContributorSocialLinkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ContributorSocialLinkClient) UpdateOneID(id int) *ContributorSocialLinkUpdateOne { + mutation := newContributorSocialLinkMutation(c.config, OpUpdateOne, withContributorSocialLinkID(id)) + return &ContributorSocialLinkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ContributorSocialLink. +func (c *ContributorSocialLinkClient) Delete() *ContributorSocialLinkDelete { + mutation := newContributorSocialLinkMutation(c.config, OpDelete) + return &ContributorSocialLinkDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ContributorSocialLinkClient) DeleteOne(csl *ContributorSocialLink) *ContributorSocialLinkDeleteOne { + return c.DeleteOneID(csl.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ContributorSocialLinkClient) DeleteOneID(id int) *ContributorSocialLinkDeleteOne { + builder := c.Delete().Where(contributorsociallink.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ContributorSocialLinkDeleteOne{builder} +} + +// Query returns a query builder for ContributorSocialLink. +func (c *ContributorSocialLinkClient) Query() *ContributorSocialLinkQuery { + return &ContributorSocialLinkQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeContributorSocialLink}, + inters: c.Interceptors(), + } +} + +// Get returns a ContributorSocialLink entity by its id. +func (c *ContributorSocialLinkClient) Get(ctx context.Context, id int) (*ContributorSocialLink, error) { + return c.Query().Where(contributorsociallink.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ContributorSocialLinkClient) GetX(ctx context.Context, id int) *ContributorSocialLink { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryContributor queries the contributor edge of a ContributorSocialLink. +func (c *ContributorSocialLinkClient) QueryContributor(csl *ContributorSocialLink) *ContributorQuery { + query := (&ContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := csl.ID + step := sqlgraph.NewStep( + sqlgraph.From(contributorsociallink.Table, contributorsociallink.FieldID, id), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, contributorsociallink.ContributorTable, contributorsociallink.ContributorColumn), + ) + fromV = sqlgraph.Neighbors(csl.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ContributorSocialLinkClient) Hooks() []Hook { + return c.hooks.ContributorSocialLink +} + +// Interceptors returns the client interceptors. +func (c *ContributorSocialLinkClient) Interceptors() []Interceptor { + return c.inters.ContributorSocialLink +} + +func (c *ContributorSocialLinkClient) mutate(ctx context.Context, m *ContributorSocialLinkMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ContributorSocialLinkCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ContributorSocialLinkUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ContributorSocialLinkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ContributorSocialLinkDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ContributorSocialLink mutation op: %q", m.Op()) + } +} + +// DailyClient is a client for the Daily schema. +type DailyClient struct { + config +} + +// NewDailyClient returns a client for the Daily from the given config. +func NewDailyClient(c config) *DailyClient { + return &DailyClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `daily.Hooks(f(g(h())))`. +func (c *DailyClient) Use(hooks ...Hook) { + c.hooks.Daily = append(c.hooks.Daily, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `daily.Intercept(f(g(h())))`. +func (c *DailyClient) Intercept(interceptors ...Interceptor) { + c.inters.Daily = append(c.inters.Daily, interceptors...) +} + +// Create returns a builder for creating a Daily entity. +func (c *DailyClient) Create() *DailyCreate { + mutation := newDailyMutation(c.config, OpCreate) + return &DailyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Daily entities. +func (c *DailyClient) CreateBulk(builders ...*DailyCreate) *DailyCreateBulk { + return &DailyCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DailyClient) MapCreateBulk(slice any, setFunc func(*DailyCreate, int)) *DailyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DailyCreateBulk{err: fmt.Errorf("calling to DailyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DailyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DailyCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Daily. +func (c *DailyClient) Update() *DailyUpdate { + mutation := newDailyMutation(c.config, OpUpdate) + return &DailyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DailyClient) UpdateOne(d *Daily) *DailyUpdateOne { + mutation := newDailyMutation(c.config, OpUpdateOne, withDaily(d)) + return &DailyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DailyClient) UpdateOneID(id string) *DailyUpdateOne { + mutation := newDailyMutation(c.config, OpUpdateOne, withDailyID(id)) + return &DailyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Daily. +func (c *DailyClient) Delete() *DailyDelete { + mutation := newDailyMutation(c.config, OpDelete) + return &DailyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DailyClient) DeleteOne(d *Daily) *DailyDeleteOne { + return c.DeleteOneID(d.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DailyClient) DeleteOneID(id string) *DailyDeleteOne { + builder := c.Delete().Where(daily.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DailyDeleteOne{builder} +} + +// Query returns a query builder for Daily. +func (c *DailyClient) Query() *DailyQuery { + return &DailyQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDaily}, + inters: c.Interceptors(), + } +} + +// Get returns a Daily entity by its id. +func (c *DailyClient) Get(ctx context.Context, id string) (*Daily, error) { + return c.Query().Where(daily.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DailyClient) GetX(ctx context.Context, id string) *Daily { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryCategory queries the category edge of a Daily. +func (c *DailyClient) QueryCategory(d *Daily) *CategoryQuery { + query := (&CategoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(daily.Table, daily.FieldID, id), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, daily.CategoryTable, daily.CategoryColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryContents queries the contents edge of a Daily. +func (c *DailyClient) QueryContents(d *Daily) *DailyContentQuery { + query := (&DailyContentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := d.ID + step := sqlgraph.NewStep( + sqlgraph.From(daily.Table, daily.FieldID, id), + sqlgraph.To(dailycontent.Table, dailycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, daily.ContentsTable, daily.ContentsColumn), + ) + fromV = sqlgraph.Neighbors(d.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DailyClient) Hooks() []Hook { + return c.hooks.Daily +} + +// Interceptors returns the client interceptors. +func (c *DailyClient) Interceptors() []Interceptor { + return c.inters.Daily +} + +func (c *DailyClient) mutate(ctx context.Context, m *DailyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DailyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DailyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DailyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DailyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Daily mutation op: %q", m.Op()) + } +} + +// DailyCategoryClient is a client for the DailyCategory schema. +type DailyCategoryClient struct { + config +} + +// NewDailyCategoryClient returns a client for the DailyCategory from the given config. +func NewDailyCategoryClient(c config) *DailyCategoryClient { + return &DailyCategoryClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `dailycategory.Hooks(f(g(h())))`. +func (c *DailyCategoryClient) Use(hooks ...Hook) { + c.hooks.DailyCategory = append(c.hooks.DailyCategory, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `dailycategory.Intercept(f(g(h())))`. +func (c *DailyCategoryClient) Intercept(interceptors ...Interceptor) { + c.inters.DailyCategory = append(c.inters.DailyCategory, interceptors...) +} + +// Create returns a builder for creating a DailyCategory entity. +func (c *DailyCategoryClient) Create() *DailyCategoryCreate { + mutation := newDailyCategoryMutation(c.config, OpCreate) + return &DailyCategoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DailyCategory entities. +func (c *DailyCategoryClient) CreateBulk(builders ...*DailyCategoryCreate) *DailyCategoryCreateBulk { + return &DailyCategoryCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DailyCategoryClient) MapCreateBulk(slice any, setFunc func(*DailyCategoryCreate, int)) *DailyCategoryCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DailyCategoryCreateBulk{err: fmt.Errorf("calling to DailyCategoryClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DailyCategoryCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DailyCategoryCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DailyCategory. +func (c *DailyCategoryClient) Update() *DailyCategoryUpdate { + mutation := newDailyCategoryMutation(c.config, OpUpdate) + return &DailyCategoryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DailyCategoryClient) UpdateOne(dc *DailyCategory) *DailyCategoryUpdateOne { + mutation := newDailyCategoryMutation(c.config, OpUpdateOne, withDailyCategory(dc)) + return &DailyCategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DailyCategoryClient) UpdateOneID(id int) *DailyCategoryUpdateOne { + mutation := newDailyCategoryMutation(c.config, OpUpdateOne, withDailyCategoryID(id)) + return &DailyCategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DailyCategory. +func (c *DailyCategoryClient) Delete() *DailyCategoryDelete { + mutation := newDailyCategoryMutation(c.config, OpDelete) + return &DailyCategoryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DailyCategoryClient) DeleteOne(dc *DailyCategory) *DailyCategoryDeleteOne { + return c.DeleteOneID(dc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DailyCategoryClient) DeleteOneID(id int) *DailyCategoryDeleteOne { + builder := c.Delete().Where(dailycategory.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DailyCategoryDeleteOne{builder} +} + +// Query returns a query builder for DailyCategory. +func (c *DailyCategoryClient) Query() *DailyCategoryQuery { + return &DailyCategoryQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDailyCategory}, + inters: c.Interceptors(), + } +} + +// Get returns a DailyCategory entity by its id. +func (c *DailyCategoryClient) Get(ctx context.Context, id int) (*DailyCategory, error) { + return c.Query().Where(dailycategory.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DailyCategoryClient) GetX(ctx context.Context, id int) *DailyCategory { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryContents queries the contents edge of a DailyCategory. +func (c *DailyCategoryClient) QueryContents(dc *DailyCategory) *DailyCategoryContentQuery { + query := (&DailyCategoryContentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := dc.ID + step := sqlgraph.NewStep( + sqlgraph.From(dailycategory.Table, dailycategory.FieldID, id), + sqlgraph.To(dailycategorycontent.Table, dailycategorycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, dailycategory.ContentsTable, dailycategory.ContentsColumn), + ) + fromV = sqlgraph.Neighbors(dc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDailyItems queries the daily_items edge of a DailyCategory. +func (c *DailyCategoryClient) QueryDailyItems(dc *DailyCategory) *DailyQuery { + query := (&DailyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := dc.ID + step := sqlgraph.NewStep( + sqlgraph.From(dailycategory.Table, dailycategory.FieldID, id), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, dailycategory.DailyItemsTable, dailycategory.DailyItemsColumn), + ) + fromV = sqlgraph.Neighbors(dc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DailyCategoryClient) Hooks() []Hook { + return c.hooks.DailyCategory +} + +// Interceptors returns the client interceptors. +func (c *DailyCategoryClient) Interceptors() []Interceptor { + return c.inters.DailyCategory +} + +func (c *DailyCategoryClient) mutate(ctx context.Context, m *DailyCategoryMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DailyCategoryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DailyCategoryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DailyCategoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DailyCategoryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown DailyCategory mutation op: %q", m.Op()) + } +} + +// DailyCategoryContentClient is a client for the DailyCategoryContent schema. +type DailyCategoryContentClient struct { + config +} + +// NewDailyCategoryContentClient returns a client for the DailyCategoryContent from the given config. +func NewDailyCategoryContentClient(c config) *DailyCategoryContentClient { + return &DailyCategoryContentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `dailycategorycontent.Hooks(f(g(h())))`. +func (c *DailyCategoryContentClient) Use(hooks ...Hook) { + c.hooks.DailyCategoryContent = append(c.hooks.DailyCategoryContent, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `dailycategorycontent.Intercept(f(g(h())))`. +func (c *DailyCategoryContentClient) Intercept(interceptors ...Interceptor) { + c.inters.DailyCategoryContent = append(c.inters.DailyCategoryContent, interceptors...) +} + +// Create returns a builder for creating a DailyCategoryContent entity. +func (c *DailyCategoryContentClient) Create() *DailyCategoryContentCreate { + mutation := newDailyCategoryContentMutation(c.config, OpCreate) + return &DailyCategoryContentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DailyCategoryContent entities. +func (c *DailyCategoryContentClient) CreateBulk(builders ...*DailyCategoryContentCreate) *DailyCategoryContentCreateBulk { + return &DailyCategoryContentCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DailyCategoryContentClient) MapCreateBulk(slice any, setFunc func(*DailyCategoryContentCreate, int)) *DailyCategoryContentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DailyCategoryContentCreateBulk{err: fmt.Errorf("calling to DailyCategoryContentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DailyCategoryContentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DailyCategoryContentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DailyCategoryContent. +func (c *DailyCategoryContentClient) Update() *DailyCategoryContentUpdate { + mutation := newDailyCategoryContentMutation(c.config, OpUpdate) + return &DailyCategoryContentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DailyCategoryContentClient) UpdateOne(dcc *DailyCategoryContent) *DailyCategoryContentUpdateOne { + mutation := newDailyCategoryContentMutation(c.config, OpUpdateOne, withDailyCategoryContent(dcc)) + return &DailyCategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DailyCategoryContentClient) UpdateOneID(id int) *DailyCategoryContentUpdateOne { + mutation := newDailyCategoryContentMutation(c.config, OpUpdateOne, withDailyCategoryContentID(id)) + return &DailyCategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DailyCategoryContent. +func (c *DailyCategoryContentClient) Delete() *DailyCategoryContentDelete { + mutation := newDailyCategoryContentMutation(c.config, OpDelete) + return &DailyCategoryContentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DailyCategoryContentClient) DeleteOne(dcc *DailyCategoryContent) *DailyCategoryContentDeleteOne { + return c.DeleteOneID(dcc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DailyCategoryContentClient) DeleteOneID(id int) *DailyCategoryContentDeleteOne { + builder := c.Delete().Where(dailycategorycontent.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DailyCategoryContentDeleteOne{builder} +} + +// Query returns a query builder for DailyCategoryContent. +func (c *DailyCategoryContentClient) Query() *DailyCategoryContentQuery { + return &DailyCategoryContentQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDailyCategoryContent}, + inters: c.Interceptors(), + } +} + +// Get returns a DailyCategoryContent entity by its id. +func (c *DailyCategoryContentClient) Get(ctx context.Context, id int) (*DailyCategoryContent, error) { + return c.Query().Where(dailycategorycontent.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DailyCategoryContentClient) GetX(ctx context.Context, id int) *DailyCategoryContent { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryCategory queries the category edge of a DailyCategoryContent. +func (c *DailyCategoryContentClient) QueryCategory(dcc *DailyCategoryContent) *DailyCategoryQuery { + query := (&DailyCategoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := dcc.ID + step := sqlgraph.NewStep( + sqlgraph.From(dailycategorycontent.Table, dailycategorycontent.FieldID, id), + sqlgraph.To(dailycategory.Table, dailycategory.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, dailycategorycontent.CategoryTable, dailycategorycontent.CategoryColumn), + ) + fromV = sqlgraph.Neighbors(dcc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DailyCategoryContentClient) Hooks() []Hook { + return c.hooks.DailyCategoryContent +} + +// Interceptors returns the client interceptors. +func (c *DailyCategoryContentClient) Interceptors() []Interceptor { + return c.inters.DailyCategoryContent +} + +func (c *DailyCategoryContentClient) mutate(ctx context.Context, m *DailyCategoryContentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DailyCategoryContentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DailyCategoryContentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DailyCategoryContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DailyCategoryContentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown DailyCategoryContent mutation op: %q", m.Op()) + } +} + +// DailyContentClient is a client for the DailyContent schema. +type DailyContentClient struct { + config +} + +// NewDailyContentClient returns a client for the DailyContent from the given config. +func NewDailyContentClient(c config) *DailyContentClient { + return &DailyContentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `dailycontent.Hooks(f(g(h())))`. +func (c *DailyContentClient) Use(hooks ...Hook) { + c.hooks.DailyContent = append(c.hooks.DailyContent, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `dailycontent.Intercept(f(g(h())))`. +func (c *DailyContentClient) Intercept(interceptors ...Interceptor) { + c.inters.DailyContent = append(c.inters.DailyContent, interceptors...) +} + +// Create returns a builder for creating a DailyContent entity. +func (c *DailyContentClient) Create() *DailyContentCreate { + mutation := newDailyContentMutation(c.config, OpCreate) + return &DailyContentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of DailyContent entities. +func (c *DailyContentClient) CreateBulk(builders ...*DailyContentCreate) *DailyContentCreateBulk { + return &DailyContentCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DailyContentClient) MapCreateBulk(slice any, setFunc func(*DailyContentCreate, int)) *DailyContentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DailyContentCreateBulk{err: fmt.Errorf("calling to DailyContentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DailyContentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DailyContentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for DailyContent. +func (c *DailyContentClient) Update() *DailyContentUpdate { + mutation := newDailyContentMutation(c.config, OpUpdate) + return &DailyContentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *DailyContentClient) UpdateOne(dc *DailyContent) *DailyContentUpdateOne { + mutation := newDailyContentMutation(c.config, OpUpdateOne, withDailyContent(dc)) + return &DailyContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *DailyContentClient) UpdateOneID(id int) *DailyContentUpdateOne { + mutation := newDailyContentMutation(c.config, OpUpdateOne, withDailyContentID(id)) + return &DailyContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for DailyContent. +func (c *DailyContentClient) Delete() *DailyContentDelete { + mutation := newDailyContentMutation(c.config, OpDelete) + return &DailyContentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *DailyContentClient) DeleteOne(dc *DailyContent) *DailyContentDeleteOne { + return c.DeleteOneID(dc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *DailyContentClient) DeleteOneID(id int) *DailyContentDeleteOne { + builder := c.Delete().Where(dailycontent.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &DailyContentDeleteOne{builder} +} + +// Query returns a query builder for DailyContent. +func (c *DailyContentClient) Query() *DailyContentQuery { + return &DailyContentQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeDailyContent}, + inters: c.Interceptors(), + } +} + +// Get returns a DailyContent entity by its id. +func (c *DailyContentClient) Get(ctx context.Context, id int) (*DailyContent, error) { + return c.Query().Where(dailycontent.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *DailyContentClient) GetX(ctx context.Context, id int) *DailyContent { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryDaily queries the daily edge of a DailyContent. +func (c *DailyContentClient) QueryDaily(dc *DailyContent) *DailyQuery { + query := (&DailyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := dc.ID + step := sqlgraph.NewStep( + sqlgraph.From(dailycontent.Table, dailycontent.FieldID, id), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, dailycontent.DailyTable, dailycontent.DailyColumn), + ) + fromV = sqlgraph.Neighbors(dc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *DailyContentClient) Hooks() []Hook { + return c.hooks.DailyContent +} + +// Interceptors returns the client interceptors. +func (c *DailyContentClient) Interceptors() []Interceptor { + return c.inters.DailyContent +} + +func (c *DailyContentClient) mutate(ctx context.Context, m *DailyContentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DailyContentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DailyContentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DailyContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DailyContentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown DailyContent mutation op: %q", m.Op()) + } +} + +// MediaClient is a client for the Media schema. +type MediaClient struct { + config +} + +// NewMediaClient returns a client for the Media from the given config. +func NewMediaClient(c config) *MediaClient { + return &MediaClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `media.Hooks(f(g(h())))`. +func (c *MediaClient) Use(hooks ...Hook) { + c.hooks.Media = append(c.hooks.Media, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `media.Intercept(f(g(h())))`. +func (c *MediaClient) Intercept(interceptors ...Interceptor) { + c.inters.Media = append(c.inters.Media, interceptors...) +} + +// Create returns a builder for creating a Media entity. +func (c *MediaClient) Create() *MediaCreate { + mutation := newMediaMutation(c.config, OpCreate) + return &MediaCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Media entities. +func (c *MediaClient) CreateBulk(builders ...*MediaCreate) *MediaCreateBulk { + return &MediaCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MediaClient) MapCreateBulk(slice any, setFunc func(*MediaCreate, int)) *MediaCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MediaCreateBulk{err: fmt.Errorf("calling to MediaClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MediaCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MediaCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Media. +func (c *MediaClient) Update() *MediaUpdate { + mutation := newMediaMutation(c.config, OpUpdate) + return &MediaUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MediaClient) UpdateOne(m *Media) *MediaUpdateOne { + mutation := newMediaMutation(c.config, OpUpdateOne, withMedia(m)) + return &MediaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MediaClient) UpdateOneID(id int) *MediaUpdateOne { + mutation := newMediaMutation(c.config, OpUpdateOne, withMediaID(id)) + return &MediaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Media. +func (c *MediaClient) Delete() *MediaDelete { + mutation := newMediaMutation(c.config, OpDelete) + return &MediaDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MediaClient) DeleteOne(m *Media) *MediaDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *MediaClient) DeleteOneID(id int) *MediaDeleteOne { + builder := c.Delete().Where(media.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MediaDeleteOne{builder} +} + +// Query returns a query builder for Media. +func (c *MediaClient) Query() *MediaQuery { + return &MediaQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeMedia}, + inters: c.Interceptors(), + } +} + +// Get returns a Media entity by its id. +func (c *MediaClient) Get(ctx context.Context, id int) (*Media, error) { + return c.Query().Where(media.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MediaClient) GetX(ctx context.Context, id int) *Media { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryOwner queries the owner edge of a Media. +func (c *MediaClient) QueryOwner(m *Media) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := m.ID + step := sqlgraph.NewStep( + sqlgraph.From(media.Table, media.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, media.OwnerTable, media.OwnerColumn), + ) + fromV = sqlgraph.Neighbors(m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *MediaClient) Hooks() []Hook { + return c.hooks.Media +} + +// Interceptors returns the client interceptors. +func (c *MediaClient) Interceptors() []Interceptor { + return c.inters.Media +} + +func (c *MediaClient) mutate(ctx context.Context, m *MediaMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MediaCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MediaUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MediaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MediaDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Media mutation op: %q", m.Op()) + } +} + +// PermissionClient is a client for the Permission schema. +type PermissionClient struct { + config +} + +// NewPermissionClient returns a client for the Permission from the given config. +func NewPermissionClient(c config) *PermissionClient { + return &PermissionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `permission.Hooks(f(g(h())))`. +func (c *PermissionClient) Use(hooks ...Hook) { + c.hooks.Permission = append(c.hooks.Permission, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `permission.Intercept(f(g(h())))`. +func (c *PermissionClient) Intercept(interceptors ...Interceptor) { + c.inters.Permission = append(c.inters.Permission, interceptors...) +} + +// Create returns a builder for creating a Permission entity. +func (c *PermissionClient) Create() *PermissionCreate { + mutation := newPermissionMutation(c.config, OpCreate) + return &PermissionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Permission entities. +func (c *PermissionClient) CreateBulk(builders ...*PermissionCreate) *PermissionCreateBulk { + return &PermissionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PermissionClient) MapCreateBulk(slice any, setFunc func(*PermissionCreate, int)) *PermissionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PermissionCreateBulk{err: fmt.Errorf("calling to PermissionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PermissionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PermissionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Permission. +func (c *PermissionClient) Update() *PermissionUpdate { + mutation := newPermissionMutation(c.config, OpUpdate) + return &PermissionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PermissionClient) UpdateOne(pe *Permission) *PermissionUpdateOne { + mutation := newPermissionMutation(c.config, OpUpdateOne, withPermission(pe)) + return &PermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PermissionClient) UpdateOneID(id int) *PermissionUpdateOne { + mutation := newPermissionMutation(c.config, OpUpdateOne, withPermissionID(id)) + return &PermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Permission. +func (c *PermissionClient) Delete() *PermissionDelete { + mutation := newPermissionMutation(c.config, OpDelete) + return &PermissionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PermissionClient) DeleteOne(pe *Permission) *PermissionDeleteOne { + return c.DeleteOneID(pe.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PermissionClient) DeleteOneID(id int) *PermissionDeleteOne { + builder := c.Delete().Where(permission.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PermissionDeleteOne{builder} +} + +// Query returns a query builder for Permission. +func (c *PermissionClient) Query() *PermissionQuery { + return &PermissionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePermission}, + inters: c.Interceptors(), + } +} + +// Get returns a Permission entity by its id. +func (c *PermissionClient) Get(ctx context.Context, id int) (*Permission, error) { + return c.Query().Where(permission.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PermissionClient) GetX(ctx context.Context, id int) *Permission { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryRoles queries the roles edge of a Permission. +func (c *PermissionClient) QueryRoles(pe *Permission) *RoleQuery { + query := (&RoleClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pe.ID + step := sqlgraph.NewStep( + sqlgraph.From(permission.Table, permission.FieldID, id), + sqlgraph.To(role.Table, role.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, permission.RolesTable, permission.RolesPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(pe.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PermissionClient) Hooks() []Hook { + return c.hooks.Permission +} + +// Interceptors returns the client interceptors. +func (c *PermissionClient) Interceptors() []Interceptor { + return c.inters.Permission +} + +func (c *PermissionClient) mutate(ctx context.Context, m *PermissionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PermissionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PermissionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PermissionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PermissionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Permission mutation op: %q", m.Op()) + } +} + +// PostClient is a client for the Post schema. +type PostClient struct { + config +} + +// NewPostClient returns a client for the Post from the given config. +func NewPostClient(c config) *PostClient { + return &PostClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `post.Hooks(f(g(h())))`. +func (c *PostClient) Use(hooks ...Hook) { + c.hooks.Post = append(c.hooks.Post, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `post.Intercept(f(g(h())))`. +func (c *PostClient) Intercept(interceptors ...Interceptor) { + c.inters.Post = append(c.inters.Post, interceptors...) +} + +// Create returns a builder for creating a Post entity. +func (c *PostClient) Create() *PostCreate { + mutation := newPostMutation(c.config, OpCreate) + return &PostCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Post entities. +func (c *PostClient) CreateBulk(builders ...*PostCreate) *PostCreateBulk { + return &PostCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PostClient) MapCreateBulk(slice any, setFunc func(*PostCreate, int)) *PostCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PostCreateBulk{err: fmt.Errorf("calling to PostClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PostCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PostCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Post. +func (c *PostClient) Update() *PostUpdate { + mutation := newPostMutation(c.config, OpUpdate) + return &PostUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PostClient) UpdateOne(po *Post) *PostUpdateOne { + mutation := newPostMutation(c.config, OpUpdateOne, withPost(po)) + return &PostUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PostClient) UpdateOneID(id int) *PostUpdateOne { + mutation := newPostMutation(c.config, OpUpdateOne, withPostID(id)) + return &PostUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Post. +func (c *PostClient) Delete() *PostDelete { + mutation := newPostMutation(c.config, OpDelete) + return &PostDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PostClient) DeleteOne(po *Post) *PostDeleteOne { + return c.DeleteOneID(po.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PostClient) DeleteOneID(id int) *PostDeleteOne { + builder := c.Delete().Where(post.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PostDeleteOne{builder} +} + +// Query returns a query builder for Post. +func (c *PostClient) Query() *PostQuery { + return &PostQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePost}, + inters: c.Interceptors(), + } +} + +// Get returns a Post entity by its id. +func (c *PostClient) Get(ctx context.Context, id int) (*Post, error) { + return c.Query().Where(post.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PostClient) GetX(ctx context.Context, id int) *Post { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryContents queries the contents edge of a Post. +func (c *PostClient) QueryContents(po *Post) *PostContentQuery { + query := (&PostContentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := po.ID + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, id), + sqlgraph.To(postcontent.Table, postcontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, post.ContentsTable, post.ContentsColumn), + ) + fromV = sqlgraph.Neighbors(po.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryContributors queries the contributors edge of a Post. +func (c *PostClient) QueryContributors(po *Post) *PostContributorQuery { + query := (&PostContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := po.ID + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, id), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, post.ContributorsTable, post.ContributorsColumn), + ) + fromV = sqlgraph.Neighbors(po.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryCategory queries the category edge of a Post. +func (c *PostClient) QueryCategory(po *Post) *CategoryQuery { + query := (&CategoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := po.ID + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, id), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, post.CategoryTable, post.CategoryColumn), + ) + fromV = sqlgraph.Neighbors(po.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PostClient) Hooks() []Hook { + return c.hooks.Post +} + +// Interceptors returns the client interceptors. +func (c *PostClient) Interceptors() []Interceptor { + return c.inters.Post +} + +func (c *PostClient) mutate(ctx context.Context, m *PostMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PostCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PostUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PostUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PostDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Post mutation op: %q", m.Op()) + } +} + +// PostContentClient is a client for the PostContent schema. +type PostContentClient struct { + config +} + +// NewPostContentClient returns a client for the PostContent from the given config. +func NewPostContentClient(c config) *PostContentClient { + return &PostContentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `postcontent.Hooks(f(g(h())))`. +func (c *PostContentClient) Use(hooks ...Hook) { + c.hooks.PostContent = append(c.hooks.PostContent, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `postcontent.Intercept(f(g(h())))`. +func (c *PostContentClient) Intercept(interceptors ...Interceptor) { + c.inters.PostContent = append(c.inters.PostContent, interceptors...) +} + +// Create returns a builder for creating a PostContent entity. +func (c *PostContentClient) Create() *PostContentCreate { + mutation := newPostContentMutation(c.config, OpCreate) + return &PostContentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PostContent entities. +func (c *PostContentClient) CreateBulk(builders ...*PostContentCreate) *PostContentCreateBulk { + return &PostContentCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PostContentClient) MapCreateBulk(slice any, setFunc func(*PostContentCreate, int)) *PostContentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PostContentCreateBulk{err: fmt.Errorf("calling to PostContentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PostContentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PostContentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PostContent. +func (c *PostContentClient) Update() *PostContentUpdate { + mutation := newPostContentMutation(c.config, OpUpdate) + return &PostContentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PostContentClient) UpdateOne(pc *PostContent) *PostContentUpdateOne { + mutation := newPostContentMutation(c.config, OpUpdateOne, withPostContent(pc)) + return &PostContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PostContentClient) UpdateOneID(id int) *PostContentUpdateOne { + mutation := newPostContentMutation(c.config, OpUpdateOne, withPostContentID(id)) + return &PostContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PostContent. +func (c *PostContentClient) Delete() *PostContentDelete { + mutation := newPostContentMutation(c.config, OpDelete) + return &PostContentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PostContentClient) DeleteOne(pc *PostContent) *PostContentDeleteOne { + return c.DeleteOneID(pc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PostContentClient) DeleteOneID(id int) *PostContentDeleteOne { + builder := c.Delete().Where(postcontent.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PostContentDeleteOne{builder} +} + +// Query returns a query builder for PostContent. +func (c *PostContentClient) Query() *PostContentQuery { + return &PostContentQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePostContent}, + inters: c.Interceptors(), + } +} + +// Get returns a PostContent entity by its id. +func (c *PostContentClient) Get(ctx context.Context, id int) (*PostContent, error) { + return c.Query().Where(postcontent.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PostContentClient) GetX(ctx context.Context, id int) *PostContent { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPost queries the post edge of a PostContent. +func (c *PostContentClient) QueryPost(pc *PostContent) *PostQuery { + query := (&PostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pc.ID + step := sqlgraph.NewStep( + sqlgraph.From(postcontent.Table, postcontent.FieldID, id), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontent.PostTable, postcontent.PostColumn), + ) + fromV = sqlgraph.Neighbors(pc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PostContentClient) Hooks() []Hook { + return c.hooks.PostContent +} + +// Interceptors returns the client interceptors. +func (c *PostContentClient) Interceptors() []Interceptor { + return c.inters.PostContent +} + +func (c *PostContentClient) mutate(ctx context.Context, m *PostContentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PostContentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PostContentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PostContentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PostContentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PostContent mutation op: %q", m.Op()) + } +} + +// PostContributorClient is a client for the PostContributor schema. +type PostContributorClient struct { + config +} + +// NewPostContributorClient returns a client for the PostContributor from the given config. +func NewPostContributorClient(c config) *PostContributorClient { + return &PostContributorClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `postcontributor.Hooks(f(g(h())))`. +func (c *PostContributorClient) Use(hooks ...Hook) { + c.hooks.PostContributor = append(c.hooks.PostContributor, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `postcontributor.Intercept(f(g(h())))`. +func (c *PostContributorClient) Intercept(interceptors ...Interceptor) { + c.inters.PostContributor = append(c.inters.PostContributor, interceptors...) +} + +// Create returns a builder for creating a PostContributor entity. +func (c *PostContributorClient) Create() *PostContributorCreate { + mutation := newPostContributorMutation(c.config, OpCreate) + return &PostContributorCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PostContributor entities. +func (c *PostContributorClient) CreateBulk(builders ...*PostContributorCreate) *PostContributorCreateBulk { + return &PostContributorCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PostContributorClient) MapCreateBulk(slice any, setFunc func(*PostContributorCreate, int)) *PostContributorCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PostContributorCreateBulk{err: fmt.Errorf("calling to PostContributorClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PostContributorCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PostContributorCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PostContributor. +func (c *PostContributorClient) Update() *PostContributorUpdate { + mutation := newPostContributorMutation(c.config, OpUpdate) + return &PostContributorUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PostContributorClient) UpdateOne(pc *PostContributor) *PostContributorUpdateOne { + mutation := newPostContributorMutation(c.config, OpUpdateOne, withPostContributor(pc)) + return &PostContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PostContributorClient) UpdateOneID(id int) *PostContributorUpdateOne { + mutation := newPostContributorMutation(c.config, OpUpdateOne, withPostContributorID(id)) + return &PostContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PostContributor. +func (c *PostContributorClient) Delete() *PostContributorDelete { + mutation := newPostContributorMutation(c.config, OpDelete) + return &PostContributorDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PostContributorClient) DeleteOne(pc *PostContributor) *PostContributorDeleteOne { + return c.DeleteOneID(pc.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PostContributorClient) DeleteOneID(id int) *PostContributorDeleteOne { + builder := c.Delete().Where(postcontributor.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PostContributorDeleteOne{builder} +} + +// Query returns a query builder for PostContributor. +func (c *PostContributorClient) Query() *PostContributorQuery { + return &PostContributorQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePostContributor}, + inters: c.Interceptors(), + } +} + +// Get returns a PostContributor entity by its id. +func (c *PostContributorClient) Get(ctx context.Context, id int) (*PostContributor, error) { + return c.Query().Where(postcontributor.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PostContributorClient) GetX(ctx context.Context, id int) *PostContributor { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPost queries the post edge of a PostContributor. +func (c *PostContributorClient) QueryPost(pc *PostContributor) *PostQuery { + query := (&PostClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pc.ID + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, id), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.PostTable, postcontributor.PostColumn), + ) + fromV = sqlgraph.Neighbors(pc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryContributor queries the contributor edge of a PostContributor. +func (c *PostContributorClient) QueryContributor(pc *PostContributor) *ContributorQuery { + query := (&ContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pc.ID + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, id), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.ContributorTable, postcontributor.ContributorColumn), + ) + fromV = sqlgraph.Neighbors(pc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryRole queries the role edge of a PostContributor. +func (c *PostContributorClient) QueryRole(pc *PostContributor) *ContributorRoleQuery { + query := (&ContributorRoleClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := pc.ID + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, id), + sqlgraph.To(contributorrole.Table, contributorrole.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.RoleTable, postcontributor.RoleColumn), + ) + fromV = sqlgraph.Neighbors(pc.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PostContributorClient) Hooks() []Hook { + return c.hooks.PostContributor +} + +// Interceptors returns the client interceptors. +func (c *PostContributorClient) Interceptors() []Interceptor { + return c.inters.PostContributor +} + +func (c *PostContributorClient) mutate(ctx context.Context, m *PostContributorMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PostContributorCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PostContributorUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PostContributorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PostContributorDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PostContributor mutation op: %q", m.Op()) + } +} + +// RoleClient is a client for the Role schema. +type RoleClient struct { + config +} + +// NewRoleClient returns a client for the Role from the given config. +func NewRoleClient(c config) *RoleClient { + return &RoleClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `role.Hooks(f(g(h())))`. +func (c *RoleClient) Use(hooks ...Hook) { + c.hooks.Role = append(c.hooks.Role, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `role.Intercept(f(g(h())))`. +func (c *RoleClient) Intercept(interceptors ...Interceptor) { + c.inters.Role = append(c.inters.Role, interceptors...) +} + +// Create returns a builder for creating a Role entity. +func (c *RoleClient) Create() *RoleCreate { + mutation := newRoleMutation(c.config, OpCreate) + return &RoleCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Role entities. +func (c *RoleClient) CreateBulk(builders ...*RoleCreate) *RoleCreateBulk { + return &RoleCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *RoleClient) MapCreateBulk(slice any, setFunc func(*RoleCreate, int)) *RoleCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &RoleCreateBulk{err: fmt.Errorf("calling to RoleClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*RoleCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &RoleCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Role. +func (c *RoleClient) Update() *RoleUpdate { + mutation := newRoleMutation(c.config, OpUpdate) + return &RoleUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *RoleClient) UpdateOne(r *Role) *RoleUpdateOne { + mutation := newRoleMutation(c.config, OpUpdateOne, withRole(r)) + return &RoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *RoleClient) UpdateOneID(id int) *RoleUpdateOne { + mutation := newRoleMutation(c.config, OpUpdateOne, withRoleID(id)) + return &RoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Role. +func (c *RoleClient) Delete() *RoleDelete { + mutation := newRoleMutation(c.config, OpDelete) + return &RoleDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *RoleClient) DeleteOne(r *Role) *RoleDeleteOne { + return c.DeleteOneID(r.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *RoleClient) DeleteOneID(id int) *RoleDeleteOne { + builder := c.Delete().Where(role.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &RoleDeleteOne{builder} +} + +// Query returns a query builder for Role. +func (c *RoleClient) Query() *RoleQuery { + return &RoleQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeRole}, + inters: c.Interceptors(), + } +} + +// Get returns a Role entity by its id. +func (c *RoleClient) Get(ctx context.Context, id int) (*Role, error) { + return c.Query().Where(role.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *RoleClient) GetX(ctx context.Context, id int) *Role { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPermissions queries the permissions edge of a Role. +func (c *RoleClient) QueryPermissions(r *Role) *PermissionQuery { + query := (&PermissionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := r.ID + step := sqlgraph.NewStep( + sqlgraph.From(role.Table, role.FieldID, id), + sqlgraph.To(permission.Table, permission.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, role.PermissionsTable, role.PermissionsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(r.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsers queries the users edge of a Role. +func (c *RoleClient) QueryUsers(r *Role) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := r.ID + step := sqlgraph.NewStep( + sqlgraph.From(role.Table, role.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, role.UsersTable, role.UsersPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(r.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *RoleClient) Hooks() []Hook { + return c.hooks.Role +} + +// Interceptors returns the client interceptors. +func (c *RoleClient) Interceptors() []Interceptor { + return c.inters.Role +} + +func (c *RoleClient) mutate(ctx context.Context, m *RoleMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RoleCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RoleUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RoleUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RoleDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Role mutation op: %q", m.Op()) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(u)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id int) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { + return c.DeleteOneID(u.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id int) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id int) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id int) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryRoles queries the roles edge of a User. +func (c *UserClient) QueryRoles(u *User) *RoleQuery { + query := (&RoleClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(role.Table, role.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.RolesTable, user.RolesPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryContributors queries the contributors edge of a User. +func (c *UserClient) QueryContributors(u *User) *ContributorQuery { + query := (&ContributorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.ContributorsTable, user.ContributorsColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryMedia queries the media edge of a User. +func (c *UserClient) QueryMedia(u *User) *MediaQuery { + query := (&MediaClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(media.Table, media.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.MediaTable, user.MediaColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + return c.hooks.User +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + return c.inters.User +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Category, CategoryContent, Contributor, ContributorRole, ContributorSocialLink, + Daily, DailyCategory, DailyCategoryContent, DailyContent, Media, Permission, + Post, PostContent, PostContributor, Role, User []ent.Hook + } + inters struct { + Category, CategoryContent, Contributor, ContributorRole, ContributorSocialLink, + Daily, DailyCategory, DailyCategoryContent, DailyContent, Media, Permission, + Post, PostContent, PostContributor, Role, User []ent.Interceptor + } +) diff --git a/backend/ent/contributor.go b/backend/ent/contributor.go new file mode 100644 index 0000000..036bd1c --- /dev/null +++ b/backend/ent/contributor.go @@ -0,0 +1,221 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Contributor is the model entity for the Contributor schema. +type Contributor struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // AvatarURL holds the value of the "avatar_url" field. + AvatarURL string `json:"avatar_url,omitempty"` + // Bio holds the value of the "bio" field. + Bio string `json:"bio,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ContributorQuery when eager-loading is set. + Edges ContributorEdges `json:"edges"` + user_contributors *int + selectValues sql.SelectValues +} + +// ContributorEdges holds the relations/edges for other nodes in the graph. +type ContributorEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // SocialLinks holds the value of the social_links edge. + SocialLinks []*ContributorSocialLink `json:"social_links,omitempty"` + // Posts holds the value of the posts edge. + Posts []*PostContributor `json:"posts,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ContributorEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// SocialLinksOrErr returns the SocialLinks value or an error if the edge +// was not loaded in eager-loading. +func (e ContributorEdges) SocialLinksOrErr() ([]*ContributorSocialLink, error) { + if e.loadedTypes[1] { + return e.SocialLinks, nil + } + return nil, &NotLoadedError{edge: "social_links"} +} + +// PostsOrErr returns the Posts value or an error if the edge +// was not loaded in eager-loading. +func (e ContributorEdges) PostsOrErr() ([]*PostContributor, error) { + if e.loadedTypes[2] { + return e.Posts, nil + } + return nil, &NotLoadedError{edge: "posts"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Contributor) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case contributor.FieldID: + values[i] = new(sql.NullInt64) + case contributor.FieldName, contributor.FieldAvatarURL, contributor.FieldBio: + values[i] = new(sql.NullString) + case contributor.FieldCreatedAt, contributor.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case contributor.ForeignKeys[0]: // user_contributors + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Contributor fields. +func (c *Contributor) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case contributor.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + c.ID = int(value.Int64) + case contributor.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + c.Name = value.String + } + case contributor.FieldAvatarURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field avatar_url", values[i]) + } else if value.Valid { + c.AvatarURL = value.String + } + case contributor.FieldBio: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field bio", values[i]) + } else if value.Valid { + c.Bio = value.String + } + case contributor.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + c.CreatedAt = value.Time + } + case contributor.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + c.UpdatedAt = value.Time + } + case contributor.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field user_contributors", value) + } else if value.Valid { + c.user_contributors = new(int) + *c.user_contributors = int(value.Int64) + } + default: + c.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Contributor. +// This includes values selected through modifiers, order, etc. +func (c *Contributor) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the Contributor entity. +func (c *Contributor) QueryUser() *UserQuery { + return NewContributorClient(c.config).QueryUser(c) +} + +// QuerySocialLinks queries the "social_links" edge of the Contributor entity. +func (c *Contributor) QuerySocialLinks() *ContributorSocialLinkQuery { + return NewContributorClient(c.config).QuerySocialLinks(c) +} + +// QueryPosts queries the "posts" edge of the Contributor entity. +func (c *Contributor) QueryPosts() *PostContributorQuery { + return NewContributorClient(c.config).QueryPosts(c) +} + +// Update returns a builder for updating this Contributor. +// Note that you need to call Contributor.Unwrap() before calling this method if this Contributor +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Contributor) Update() *ContributorUpdateOne { + return NewContributorClient(c.config).UpdateOne(c) +} + +// Unwrap unwraps the Contributor entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (c *Contributor) Unwrap() *Contributor { + _tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("ent: Contributor is not a transactional entity") + } + c.config.driver = _tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Contributor) String() string { + var builder strings.Builder + builder.WriteString("Contributor(") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("name=") + builder.WriteString(c.Name) + builder.WriteString(", ") + builder.WriteString("avatar_url=") + builder.WriteString(c.AvatarURL) + builder.WriteString(", ") + builder.WriteString("bio=") + builder.WriteString(c.Bio) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(c.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(c.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Contributors is a parsable slice of Contributor. +type Contributors []*Contributor diff --git a/backend/ent/contributor/contributor.go b/backend/ent/contributor/contributor.go new file mode 100644 index 0000000..54441ed --- /dev/null +++ b/backend/ent/contributor/contributor.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package contributor + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the contributor type in the database. + Label = "contributor" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldAvatarURL holds the string denoting the avatar_url field in the database. + FieldAvatarURL = "avatar_url" + // FieldBio holds the string denoting the bio field in the database. + FieldBio = "bio" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeSocialLinks holds the string denoting the social_links edge name in mutations. + EdgeSocialLinks = "social_links" + // EdgePosts holds the string denoting the posts edge name in mutations. + EdgePosts = "posts" + // Table holds the table name of the contributor in the database. + Table = "contributors" + // UserTable is the table that holds the user relation/edge. + UserTable = "contributors" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_contributors" + // SocialLinksTable is the table that holds the social_links relation/edge. + SocialLinksTable = "contributor_social_links" + // SocialLinksInverseTable is the table name for the ContributorSocialLink entity. + // It exists in this package in order to avoid circular dependency with the "contributorsociallink" package. + SocialLinksInverseTable = "contributor_social_links" + // SocialLinksColumn is the table column denoting the social_links relation/edge. + SocialLinksColumn = "contributor_social_links" + // PostsTable is the table that holds the posts relation/edge. + PostsTable = "post_contributors" + // PostsInverseTable is the table name for the PostContributor entity. + // It exists in this package in order to avoid circular dependency with the "postcontributor" package. + PostsInverseTable = "post_contributors" + // PostsColumn is the table column denoting the posts relation/edge. + PostsColumn = "contributor_posts" +) + +// Columns holds all SQL columns for contributor fields. +var Columns = []string{ + FieldID, + FieldName, + FieldAvatarURL, + FieldBio, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "contributors" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "user_contributors", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Contributor queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByAvatarURL orders the results by the avatar_url field. +func ByAvatarURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAvatarURL, opts...).ToFunc() +} + +// ByBio orders the results by the bio field. +func ByBio(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBio, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// BySocialLinksCount orders the results by social_links count. +func BySocialLinksCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSocialLinksStep(), opts...) + } +} + +// BySocialLinks orders the results by social_links terms. +func BySocialLinks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSocialLinksStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByPostsCount orders the results by posts count. +func ByPostsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPostsStep(), opts...) + } +} + +// ByPosts orders the results by posts terms. +func ByPosts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPostsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newSocialLinksStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SocialLinksInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SocialLinksTable, SocialLinksColumn), + ) +} +func newPostsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PostsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostsTable, PostsColumn), + ) +} diff --git a/backend/ent/contributor/where.go b/backend/ent/contributor/where.go new file mode 100644 index 0000000..34a4477 --- /dev/null +++ b/backend/ent/contributor/where.go @@ -0,0 +1,460 @@ +// Code generated by ent, DO NOT EDIT. + +package contributor + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldName, v)) +} + +// AvatarURL applies equality check predicate on the "avatar_url" field. It's identical to AvatarURLEQ. +func AvatarURL(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldAvatarURL, v)) +} + +// Bio applies equality check predicate on the "bio" field. It's identical to BioEQ. +func Bio(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldBio, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContainsFold(FieldName, v)) +} + +// AvatarURLEQ applies the EQ predicate on the "avatar_url" field. +func AvatarURLEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldAvatarURL, v)) +} + +// AvatarURLNEQ applies the NEQ predicate on the "avatar_url" field. +func AvatarURLNEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldAvatarURL, v)) +} + +// AvatarURLIn applies the In predicate on the "avatar_url" field. +func AvatarURLIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldAvatarURL, vs...)) +} + +// AvatarURLNotIn applies the NotIn predicate on the "avatar_url" field. +func AvatarURLNotIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldAvatarURL, vs...)) +} + +// AvatarURLGT applies the GT predicate on the "avatar_url" field. +func AvatarURLGT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldAvatarURL, v)) +} + +// AvatarURLGTE applies the GTE predicate on the "avatar_url" field. +func AvatarURLGTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldAvatarURL, v)) +} + +// AvatarURLLT applies the LT predicate on the "avatar_url" field. +func AvatarURLLT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldAvatarURL, v)) +} + +// AvatarURLLTE applies the LTE predicate on the "avatar_url" field. +func AvatarURLLTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldAvatarURL, v)) +} + +// AvatarURLContains applies the Contains predicate on the "avatar_url" field. +func AvatarURLContains(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContains(FieldAvatarURL, v)) +} + +// AvatarURLHasPrefix applies the HasPrefix predicate on the "avatar_url" field. +func AvatarURLHasPrefix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasPrefix(FieldAvatarURL, v)) +} + +// AvatarURLHasSuffix applies the HasSuffix predicate on the "avatar_url" field. +func AvatarURLHasSuffix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasSuffix(FieldAvatarURL, v)) +} + +// AvatarURLIsNil applies the IsNil predicate on the "avatar_url" field. +func AvatarURLIsNil() predicate.Contributor { + return predicate.Contributor(sql.FieldIsNull(FieldAvatarURL)) +} + +// AvatarURLNotNil applies the NotNil predicate on the "avatar_url" field. +func AvatarURLNotNil() predicate.Contributor { + return predicate.Contributor(sql.FieldNotNull(FieldAvatarURL)) +} + +// AvatarURLEqualFold applies the EqualFold predicate on the "avatar_url" field. +func AvatarURLEqualFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEqualFold(FieldAvatarURL, v)) +} + +// AvatarURLContainsFold applies the ContainsFold predicate on the "avatar_url" field. +func AvatarURLContainsFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContainsFold(FieldAvatarURL, v)) +} + +// BioEQ applies the EQ predicate on the "bio" field. +func BioEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldBio, v)) +} + +// BioNEQ applies the NEQ predicate on the "bio" field. +func BioNEQ(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldBio, v)) +} + +// BioIn applies the In predicate on the "bio" field. +func BioIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldBio, vs...)) +} + +// BioNotIn applies the NotIn predicate on the "bio" field. +func BioNotIn(vs ...string) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldBio, vs...)) +} + +// BioGT applies the GT predicate on the "bio" field. +func BioGT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldBio, v)) +} + +// BioGTE applies the GTE predicate on the "bio" field. +func BioGTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldBio, v)) +} + +// BioLT applies the LT predicate on the "bio" field. +func BioLT(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldBio, v)) +} + +// BioLTE applies the LTE predicate on the "bio" field. +func BioLTE(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldBio, v)) +} + +// BioContains applies the Contains predicate on the "bio" field. +func BioContains(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContains(FieldBio, v)) +} + +// BioHasPrefix applies the HasPrefix predicate on the "bio" field. +func BioHasPrefix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasPrefix(FieldBio, v)) +} + +// BioHasSuffix applies the HasSuffix predicate on the "bio" field. +func BioHasSuffix(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldHasSuffix(FieldBio, v)) +} + +// BioIsNil applies the IsNil predicate on the "bio" field. +func BioIsNil() predicate.Contributor { + return predicate.Contributor(sql.FieldIsNull(FieldBio)) +} + +// BioNotNil applies the NotNil predicate on the "bio" field. +func BioNotNil() predicate.Contributor { + return predicate.Contributor(sql.FieldNotNull(FieldBio)) +} + +// BioEqualFold applies the EqualFold predicate on the "bio" field. +func BioEqualFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldEqualFold(FieldBio, v)) +} + +// BioContainsFold applies the ContainsFold predicate on the "bio" field. +func BioContainsFold(v string) predicate.Contributor { + return predicate.Contributor(sql.FieldContainsFold(FieldBio, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Contributor { + return predicate.Contributor(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSocialLinks applies the HasEdge predicate on the "social_links" edge. +func HasSocialLinks() predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SocialLinksTable, SocialLinksColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSocialLinksWith applies the HasEdge predicate on the "social_links" edge with a given conditions (other predicates). +func HasSocialLinksWith(preds ...predicate.ContributorSocialLink) predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := newSocialLinksStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasPosts applies the HasEdge predicate on the "posts" edge. +func HasPosts() predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostsTable, PostsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPostsWith applies the HasEdge predicate on the "posts" edge with a given conditions (other predicates). +func HasPostsWith(preds ...predicate.PostContributor) predicate.Contributor { + return predicate.Contributor(func(s *sql.Selector) { + step := newPostsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Contributor) predicate.Contributor { + return predicate.Contributor(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Contributor) predicate.Contributor { + return predicate.Contributor(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Contributor) predicate.Contributor { + return predicate.Contributor(sql.NotPredicates(p)) +} diff --git a/backend/ent/contributor_create.go b/backend/ent/contributor_create.go new file mode 100644 index 0000000..9313fa2 --- /dev/null +++ b/backend/ent/contributor_create.go @@ -0,0 +1,382 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorCreate is the builder for creating a Contributor entity. +type ContributorCreate struct { + config + mutation *ContributorMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (cc *ContributorCreate) SetName(s string) *ContributorCreate { + cc.mutation.SetName(s) + return cc +} + +// SetAvatarURL sets the "avatar_url" field. +func (cc *ContributorCreate) SetAvatarURL(s string) *ContributorCreate { + cc.mutation.SetAvatarURL(s) + return cc +} + +// SetNillableAvatarURL sets the "avatar_url" field if the given value is not nil. +func (cc *ContributorCreate) SetNillableAvatarURL(s *string) *ContributorCreate { + if s != nil { + cc.SetAvatarURL(*s) + } + return cc +} + +// SetBio sets the "bio" field. +func (cc *ContributorCreate) SetBio(s string) *ContributorCreate { + cc.mutation.SetBio(s) + return cc +} + +// SetNillableBio sets the "bio" field if the given value is not nil. +func (cc *ContributorCreate) SetNillableBio(s *string) *ContributorCreate { + if s != nil { + cc.SetBio(*s) + } + return cc +} + +// SetCreatedAt sets the "created_at" field. +func (cc *ContributorCreate) SetCreatedAt(t time.Time) *ContributorCreate { + cc.mutation.SetCreatedAt(t) + return cc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cc *ContributorCreate) SetNillableCreatedAt(t *time.Time) *ContributorCreate { + if t != nil { + cc.SetCreatedAt(*t) + } + return cc +} + +// SetUpdatedAt sets the "updated_at" field. +func (cc *ContributorCreate) SetUpdatedAt(t time.Time) *ContributorCreate { + cc.mutation.SetUpdatedAt(t) + return cc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (cc *ContributorCreate) SetNillableUpdatedAt(t *time.Time) *ContributorCreate { + if t != nil { + cc.SetUpdatedAt(*t) + } + return cc +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (cc *ContributorCreate) SetUserID(id int) *ContributorCreate { + cc.mutation.SetUserID(id) + return cc +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (cc *ContributorCreate) SetNillableUserID(id *int) *ContributorCreate { + if id != nil { + cc = cc.SetUserID(*id) + } + return cc +} + +// SetUser sets the "user" edge to the User entity. +func (cc *ContributorCreate) SetUser(u *User) *ContributorCreate { + return cc.SetUserID(u.ID) +} + +// AddSocialLinkIDs adds the "social_links" edge to the ContributorSocialLink entity by IDs. +func (cc *ContributorCreate) AddSocialLinkIDs(ids ...int) *ContributorCreate { + cc.mutation.AddSocialLinkIDs(ids...) + return cc +} + +// AddSocialLinks adds the "social_links" edges to the ContributorSocialLink entity. +func (cc *ContributorCreate) AddSocialLinks(c ...*ContributorSocialLink) *ContributorCreate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cc.AddSocialLinkIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the PostContributor entity by IDs. +func (cc *ContributorCreate) AddPostIDs(ids ...int) *ContributorCreate { + cc.mutation.AddPostIDs(ids...) + return cc +} + +// AddPosts adds the "posts" edges to the PostContributor entity. +func (cc *ContributorCreate) AddPosts(p ...*PostContributor) *ContributorCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cc.AddPostIDs(ids...) +} + +// Mutation returns the ContributorMutation object of the builder. +func (cc *ContributorCreate) Mutation() *ContributorMutation { + return cc.mutation +} + +// Save creates the Contributor in the database. +func (cc *ContributorCreate) Save(ctx context.Context) (*Contributor, error) { + cc.defaults() + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *ContributorCreate) SaveX(ctx context.Context) *Contributor { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cc *ContributorCreate) Exec(ctx context.Context) error { + _, err := cc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cc *ContributorCreate) ExecX(ctx context.Context) { + if err := cc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cc *ContributorCreate) defaults() { + if _, ok := cc.mutation.CreatedAt(); !ok { + v := contributor.DefaultCreatedAt() + cc.mutation.SetCreatedAt(v) + } + if _, ok := cc.mutation.UpdatedAt(); !ok { + v := contributor.DefaultUpdatedAt() + cc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cc *ContributorCreate) check() error { + if _, ok := cc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Contributor.name"`)} + } + if v, ok := cc.mutation.Name(); ok { + if err := contributor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Contributor.name": %w`, err)} + } + } + if _, ok := cc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Contributor.created_at"`)} + } + if _, ok := cc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Contributor.updated_at"`)} + } + return nil +} + +func (cc *ContributorCreate) sqlSave(ctx context.Context) (*Contributor, error) { + if err := cc.check(); err != nil { + return nil, err + } + _node, _spec := cc.createSpec() + if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + cc.mutation.id = &_node.ID + cc.mutation.done = true + return _node, nil +} + +func (cc *ContributorCreate) createSpec() (*Contributor, *sqlgraph.CreateSpec) { + var ( + _node = &Contributor{config: cc.config} + _spec = sqlgraph.NewCreateSpec(contributor.Table, sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt)) + ) + if value, ok := cc.mutation.Name(); ok { + _spec.SetField(contributor.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := cc.mutation.AvatarURL(); ok { + _spec.SetField(contributor.FieldAvatarURL, field.TypeString, value) + _node.AvatarURL = value + } + if value, ok := cc.mutation.Bio(); ok { + _spec.SetField(contributor.FieldBio, field.TypeString, value) + _node.Bio = value + } + if value, ok := cc.mutation.CreatedAt(); ok { + _spec.SetField(contributor.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := cc.mutation.UpdatedAt(); ok { + _spec.SetField(contributor.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := cc.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributor.UserTable, + Columns: []string{contributor.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.user_contributors = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := cc.mutation.SocialLinksIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := cc.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// ContributorCreateBulk is the builder for creating many Contributor entities in bulk. +type ContributorCreateBulk struct { + config + err error + builders []*ContributorCreate +} + +// Save creates the Contributor entities in the database. +func (ccb *ContributorCreateBulk) Save(ctx context.Context) ([]*Contributor, error) { + if ccb.err != nil { + return nil, ccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) + nodes := make([]*Contributor, len(ccb.builders)) + mutators := make([]Mutator, len(ccb.builders)) + for i := range ccb.builders { + func(i int, root context.Context) { + builder := ccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ContributorMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ccb *ContributorCreateBulk) SaveX(ctx context.Context) []*Contributor { + v, err := ccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ccb *ContributorCreateBulk) Exec(ctx context.Context) error { + _, err := ccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccb *ContributorCreateBulk) ExecX(ctx context.Context) { + if err := ccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributor_delete.go b/backend/ent/contributor_delete.go new file mode 100644 index 0000000..9ae19df --- /dev/null +++ b/backend/ent/contributor_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorDelete is the builder for deleting a Contributor entity. +type ContributorDelete struct { + config + hooks []Hook + mutation *ContributorMutation +} + +// Where appends a list predicates to the ContributorDelete builder. +func (cd *ContributorDelete) Where(ps ...predicate.Contributor) *ContributorDelete { + cd.mutation.Where(ps...) + return cd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (cd *ContributorDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *ContributorDelete) ExecX(ctx context.Context) int { + n, err := cd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (cd *ContributorDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(contributor.Table, sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt)) + if ps := cd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err +} + +// ContributorDeleteOne is the builder for deleting a single Contributor entity. +type ContributorDeleteOne struct { + cd *ContributorDelete +} + +// Where appends a list predicates to the ContributorDelete builder. +func (cdo *ContributorDeleteOne) Where(ps ...predicate.Contributor) *ContributorDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + +// Exec executes the deletion query. +func (cdo *ContributorDeleteOne) Exec(ctx context.Context) error { + n, err := cdo.cd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{contributor.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *ContributorDeleteOne) ExecX(ctx context.Context) { + if err := cdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributor_query.go b/backend/ent/contributor_query.go new file mode 100644 index 0000000..894fd42 --- /dev/null +++ b/backend/ent/contributor_query.go @@ -0,0 +1,765 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorQuery is the builder for querying Contributor entities. +type ContributorQuery struct { + config + ctx *QueryContext + order []contributor.OrderOption + inters []Interceptor + predicates []predicate.Contributor + withUser *UserQuery + withSocialLinks *ContributorSocialLinkQuery + withPosts *PostContributorQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ContributorQuery builder. +func (cq *ContributorQuery) Where(ps ...predicate.Contributor) *ContributorQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit the number of records to be returned by this query. +func (cq *ContributorQuery) Limit(limit int) *ContributorQuery { + cq.ctx.Limit = &limit + return cq +} + +// Offset to start from. +func (cq *ContributorQuery) Offset(offset int) *ContributorQuery { + cq.ctx.Offset = &offset + return cq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cq *ContributorQuery) Unique(unique bool) *ContributorQuery { + cq.ctx.Unique = &unique + return cq +} + +// Order specifies how the records should be ordered. +func (cq *ContributorQuery) Order(o ...contributor.OrderOption) *ContributorQuery { + cq.order = append(cq.order, o...) + return cq +} + +// QueryUser chains the current query on the "user" edge. +func (cq *ContributorQuery) QueryUser() *UserQuery { + query := (&UserClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, contributor.UserTable, contributor.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySocialLinks chains the current query on the "social_links" edge. +func (cq *ContributorQuery) QuerySocialLinks() *ContributorSocialLinkQuery { + query := (&ContributorSocialLinkClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, selector), + sqlgraph.To(contributorsociallink.Table, contributorsociallink.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributor.SocialLinksTable, contributor.SocialLinksColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryPosts chains the current query on the "posts" edge. +func (cq *ContributorQuery) QueryPosts() *PostContributorQuery { + query := (&PostContributorClient{config: cq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(contributor.Table, contributor.FieldID, selector), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributor.PostsTable, contributor.PostsColumn), + ) + fromU = sqlgraph.SetNeighbors(cq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Contributor entity from the query. +// Returns a *NotFoundError when no Contributor was found. +func (cq *ContributorQuery) First(ctx context.Context) (*Contributor, error) { + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{contributor.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *ContributorQuery) FirstX(ctx context.Context) *Contributor { + node, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Contributor ID from the query. +// Returns a *NotFoundError when no Contributor ID was found. +func (cq *ContributorQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{contributor.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cq *ContributorQuery) FirstIDX(ctx context.Context) int { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Contributor entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Contributor entity is found. +// Returns a *NotFoundError when no Contributor entities are found. +func (cq *ContributorQuery) Only(ctx context.Context) (*Contributor, error) { + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{contributor.Label} + default: + return nil, &NotSingularError{contributor.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *ContributorQuery) OnlyX(ctx context.Context) *Contributor { + node, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Contributor ID in the query. +// Returns a *NotSingularError when more than one Contributor ID is found. +// Returns a *NotFoundError when no entities are found. +func (cq *ContributorQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{contributor.Label} + default: + err = &NotSingularError{contributor.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cq *ContributorQuery) OnlyIDX(ctx context.Context) int { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Contributors. +func (cq *ContributorQuery) All(ctx context.Context) ([]*Contributor, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryAll) + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Contributor, *ContributorQuery]() + return withInterceptors[[]*Contributor](ctx, cq, qr, cq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cq *ContributorQuery) AllX(ctx context.Context) []*Contributor { + nodes, err := cq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Contributor IDs. +func (cq *ContributorQuery) IDs(ctx context.Context) (ids []int, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryIDs) + if err = cq.Select(contributor.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *ContributorQuery) IDsX(ctx context.Context) []int { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *ContributorQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryCount) + if err := cq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cq, querierCount[*ContributorQuery](), cq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cq *ContributorQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *ContributorQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cq.ctx, ent.OpQueryExist) + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *ContributorQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ContributorQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cq *ContributorQuery) Clone() *ContributorQuery { + if cq == nil { + return nil + } + return &ContributorQuery{ + config: cq.config, + ctx: cq.ctx.Clone(), + order: append([]contributor.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), + predicates: append([]predicate.Contributor{}, cq.predicates...), + withUser: cq.withUser.Clone(), + withSocialLinks: cq.withSocialLinks.Clone(), + withPosts: cq.withPosts.Clone(), + // clone intermediate query. + sql: cq.sql.Clone(), + path: cq.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *ContributorQuery) WithUser(opts ...func(*UserQuery)) *ContributorQuery { + query := (&UserClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withUser = query + return cq +} + +// WithSocialLinks tells the query-builder to eager-load the nodes that are connected to +// the "social_links" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *ContributorQuery) WithSocialLinks(opts ...func(*ContributorSocialLinkQuery)) *ContributorQuery { + query := (&ContributorSocialLinkClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withSocialLinks = query + return cq +} + +// WithPosts tells the query-builder to eager-load the nodes that are connected to +// the "posts" edge. The optional arguments are used to configure the query builder of the edge. +func (cq *ContributorQuery) WithPosts(opts ...func(*PostContributorQuery)) *ContributorQuery { + query := (&PostContributorClient{config: cq.config}).Query() + for _, opt := range opts { + opt(query) + } + cq.withPosts = query + return cq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Contributor.Query(). +// GroupBy(contributor.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (cq *ContributorQuery) GroupBy(field string, fields ...string) *ContributorGroupBy { + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ContributorGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = contributor.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.Contributor.Query(). +// Select(contributor.FieldName). +// Scan(ctx, &v) +func (cq *ContributorQuery) Select(fields ...string) *ContributorSelect { + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &ContributorSelect{ContributorQuery: cq} + sbuild.label = contributor.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ContributorSelect configured with the given aggregations. +func (cq *ContributorQuery) Aggregate(fns ...AggregateFunc) *ContributorSelect { + return cq.Select().Aggregate(fns...) +} + +func (cq *ContributorQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { + if !contributor.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if cq.path != nil { + prev, err := cq.path(ctx) + if err != nil { + return err + } + cq.sql = prev + } + return nil +} + +func (cq *ContributorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Contributor, error) { + var ( + nodes = []*Contributor{} + withFKs = cq.withFKs + _spec = cq.querySpec() + loadedTypes = [3]bool{ + cq.withUser != nil, + cq.withSocialLinks != nil, + cq.withPosts != nil, + } + ) + if cq.withUser != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, contributor.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Contributor).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Contributor{config: cq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := cq.withUser; query != nil { + if err := cq.loadUser(ctx, query, nodes, nil, + func(n *Contributor, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := cq.withSocialLinks; query != nil { + if err := cq.loadSocialLinks(ctx, query, nodes, + func(n *Contributor) { n.Edges.SocialLinks = []*ContributorSocialLink{} }, + func(n *Contributor, e *ContributorSocialLink) { n.Edges.SocialLinks = append(n.Edges.SocialLinks, e) }); err != nil { + return nil, err + } + } + if query := cq.withPosts; query != nil { + if err := cq.loadPosts(ctx, query, nodes, + func(n *Contributor) { n.Edges.Posts = []*PostContributor{} }, + func(n *Contributor, e *PostContributor) { n.Edges.Posts = append(n.Edges.Posts, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (cq *ContributorQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Contributor, init func(*Contributor), assign func(*Contributor, *User)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Contributor) + for i := range nodes { + if nodes[i].user_contributors == nil { + continue + } + fk := *nodes[i].user_contributors + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_contributors" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (cq *ContributorQuery) loadSocialLinks(ctx context.Context, query *ContributorSocialLinkQuery, nodes []*Contributor, init func(*Contributor), assign func(*Contributor, *ContributorSocialLink)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Contributor) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.ContributorSocialLink(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(contributor.SocialLinksColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.contributor_social_links + if fk == nil { + return fmt.Errorf(`foreign-key "contributor_social_links" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "contributor_social_links" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (cq *ContributorQuery) loadPosts(ctx context.Context, query *PostContributorQuery, nodes []*Contributor, init func(*Contributor), assign func(*Contributor, *PostContributor)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Contributor) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.PostContributor(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(contributor.PostsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.contributor_posts + if fk == nil { + return fmt.Errorf(`foreign-key "contributor_posts" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "contributor_posts" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (cq *ContributorQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cq.querySpec() + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cq.driver, _spec) +} + +func (cq *ContributorQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(contributor.Table, contributor.Columns, sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true + } + if fields := cq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributor.FieldID) + for i := range fields { + if fields[i] != contributor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cq *ContributorQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cq.driver.Dialect()) + t1 := builder.Table(contributor.Table) + columns := cq.ctx.Fields + if len(columns) == 0 { + columns = contributor.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(columns...)...) + } + if cq.ctx.Unique != nil && *cq.ctx.Unique { + selector.Distinct() + } + for _, p := range cq.predicates { + p(selector) + } + for _, p := range cq.order { + p(selector) + } + if offset := cq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ContributorGroupBy is the group-by builder for Contributor entities. +type ContributorGroupBy struct { + selector + build *ContributorQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *ContributorGroupBy) Aggregate(fns ...AggregateFunc) *ContributorGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cgb *ContributorGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, ent.OpQueryGroupBy) + if err := cgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorQuery, *ContributorGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) +} + +func (cgb *ContributorGroupBy) sqlScan(ctx context.Context, root *ContributorQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ContributorSelect is the builder for selecting fields of Contributor entities. +type ContributorSelect struct { + *ContributorQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *ContributorSelect) Aggregate(fns ...AggregateFunc) *ContributorSelect { + cs.fns = append(cs.fns, fns...) + return cs +} + +// Scan applies the selector query and scans the result into the given value. +func (cs *ContributorSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, ent.OpQuerySelect) + if err := cs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorQuery, *ContributorSelect](ctx, cs.ContributorQuery, cs, cs.inters, v) +} + +func (cs *ContributorSelect) sqlScan(ctx context.Context, root *ContributorQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/contributor_update.go b/backend/ent/contributor_update.go new file mode 100644 index 0000000..241a2bb --- /dev/null +++ b/backend/ent/contributor_update.go @@ -0,0 +1,845 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorUpdate is the builder for updating Contributor entities. +type ContributorUpdate struct { + config + hooks []Hook + mutation *ContributorMutation +} + +// Where appends a list predicates to the ContributorUpdate builder. +func (cu *ContributorUpdate) Where(ps ...predicate.Contributor) *ContributorUpdate { + cu.mutation.Where(ps...) + return cu +} + +// SetName sets the "name" field. +func (cu *ContributorUpdate) SetName(s string) *ContributorUpdate { + cu.mutation.SetName(s) + return cu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cu *ContributorUpdate) SetNillableName(s *string) *ContributorUpdate { + if s != nil { + cu.SetName(*s) + } + return cu +} + +// SetAvatarURL sets the "avatar_url" field. +func (cu *ContributorUpdate) SetAvatarURL(s string) *ContributorUpdate { + cu.mutation.SetAvatarURL(s) + return cu +} + +// SetNillableAvatarURL sets the "avatar_url" field if the given value is not nil. +func (cu *ContributorUpdate) SetNillableAvatarURL(s *string) *ContributorUpdate { + if s != nil { + cu.SetAvatarURL(*s) + } + return cu +} + +// ClearAvatarURL clears the value of the "avatar_url" field. +func (cu *ContributorUpdate) ClearAvatarURL() *ContributorUpdate { + cu.mutation.ClearAvatarURL() + return cu +} + +// SetBio sets the "bio" field. +func (cu *ContributorUpdate) SetBio(s string) *ContributorUpdate { + cu.mutation.SetBio(s) + return cu +} + +// SetNillableBio sets the "bio" field if the given value is not nil. +func (cu *ContributorUpdate) SetNillableBio(s *string) *ContributorUpdate { + if s != nil { + cu.SetBio(*s) + } + return cu +} + +// ClearBio clears the value of the "bio" field. +func (cu *ContributorUpdate) ClearBio() *ContributorUpdate { + cu.mutation.ClearBio() + return cu +} + +// SetCreatedAt sets the "created_at" field. +func (cu *ContributorUpdate) SetCreatedAt(t time.Time) *ContributorUpdate { + cu.mutation.SetCreatedAt(t) + return cu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cu *ContributorUpdate) SetNillableCreatedAt(t *time.Time) *ContributorUpdate { + if t != nil { + cu.SetCreatedAt(*t) + } + return cu +} + +// SetUpdatedAt sets the "updated_at" field. +func (cu *ContributorUpdate) SetUpdatedAt(t time.Time) *ContributorUpdate { + cu.mutation.SetUpdatedAt(t) + return cu +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (cu *ContributorUpdate) SetUserID(id int) *ContributorUpdate { + cu.mutation.SetUserID(id) + return cu +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (cu *ContributorUpdate) SetNillableUserID(id *int) *ContributorUpdate { + if id != nil { + cu = cu.SetUserID(*id) + } + return cu +} + +// SetUser sets the "user" edge to the User entity. +func (cu *ContributorUpdate) SetUser(u *User) *ContributorUpdate { + return cu.SetUserID(u.ID) +} + +// AddSocialLinkIDs adds the "social_links" edge to the ContributorSocialLink entity by IDs. +func (cu *ContributorUpdate) AddSocialLinkIDs(ids ...int) *ContributorUpdate { + cu.mutation.AddSocialLinkIDs(ids...) + return cu +} + +// AddSocialLinks adds the "social_links" edges to the ContributorSocialLink entity. +func (cu *ContributorUpdate) AddSocialLinks(c ...*ContributorSocialLink) *ContributorUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cu.AddSocialLinkIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the PostContributor entity by IDs. +func (cu *ContributorUpdate) AddPostIDs(ids ...int) *ContributorUpdate { + cu.mutation.AddPostIDs(ids...) + return cu +} + +// AddPosts adds the "posts" edges to the PostContributor entity. +func (cu *ContributorUpdate) AddPosts(p ...*PostContributor) *ContributorUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cu.AddPostIDs(ids...) +} + +// Mutation returns the ContributorMutation object of the builder. +func (cu *ContributorUpdate) Mutation() *ContributorMutation { + return cu.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (cu *ContributorUpdate) ClearUser() *ContributorUpdate { + cu.mutation.ClearUser() + return cu +} + +// ClearSocialLinks clears all "social_links" edges to the ContributorSocialLink entity. +func (cu *ContributorUpdate) ClearSocialLinks() *ContributorUpdate { + cu.mutation.ClearSocialLinks() + return cu +} + +// RemoveSocialLinkIDs removes the "social_links" edge to ContributorSocialLink entities by IDs. +func (cu *ContributorUpdate) RemoveSocialLinkIDs(ids ...int) *ContributorUpdate { + cu.mutation.RemoveSocialLinkIDs(ids...) + return cu +} + +// RemoveSocialLinks removes "social_links" edges to ContributorSocialLink entities. +func (cu *ContributorUpdate) RemoveSocialLinks(c ...*ContributorSocialLink) *ContributorUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cu.RemoveSocialLinkIDs(ids...) +} + +// ClearPosts clears all "posts" edges to the PostContributor entity. +func (cu *ContributorUpdate) ClearPosts() *ContributorUpdate { + cu.mutation.ClearPosts() + return cu +} + +// RemovePostIDs removes the "posts" edge to PostContributor entities by IDs. +func (cu *ContributorUpdate) RemovePostIDs(ids ...int) *ContributorUpdate { + cu.mutation.RemovePostIDs(ids...) + return cu +} + +// RemovePosts removes "posts" edges to PostContributor entities. +func (cu *ContributorUpdate) RemovePosts(p ...*PostContributor) *ContributorUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cu.RemovePostIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cu *ContributorUpdate) Save(ctx context.Context) (int, error) { + cu.defaults() + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *ContributorUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *ContributorUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *ContributorUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cu *ContributorUpdate) defaults() { + if _, ok := cu.mutation.UpdatedAt(); !ok { + v := contributor.UpdateDefaultUpdatedAt() + cu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cu *ContributorUpdate) check() error { + if v, ok := cu.mutation.Name(); ok { + if err := contributor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Contributor.name": %w`, err)} + } + } + return nil +} + +func (cu *ContributorUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := cu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(contributor.Table, contributor.Columns, sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt)) + if ps := cu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cu.mutation.Name(); ok { + _spec.SetField(contributor.FieldName, field.TypeString, value) + } + if value, ok := cu.mutation.AvatarURL(); ok { + _spec.SetField(contributor.FieldAvatarURL, field.TypeString, value) + } + if cu.mutation.AvatarURLCleared() { + _spec.ClearField(contributor.FieldAvatarURL, field.TypeString) + } + if value, ok := cu.mutation.Bio(); ok { + _spec.SetField(contributor.FieldBio, field.TypeString, value) + } + if cu.mutation.BioCleared() { + _spec.ClearField(contributor.FieldBio, field.TypeString) + } + if value, ok := cu.mutation.CreatedAt(); ok { + _spec.SetField(contributor.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := cu.mutation.UpdatedAt(); ok { + _spec.SetField(contributor.FieldUpdatedAt, field.TypeTime, value) + } + if cu.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributor.UserTable, + Columns: []string{contributor.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributor.UserTable, + Columns: []string{contributor.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cu.mutation.SocialLinksCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.RemovedSocialLinksIDs(); len(nodes) > 0 && !cu.mutation.SocialLinksCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.SocialLinksIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cu.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.RemovedPostsIDs(); len(nodes) > 0 && !cu.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cu.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, cu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cu.mutation.done = true + return n, nil +} + +// ContributorUpdateOne is the builder for updating a single Contributor entity. +type ContributorUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ContributorMutation +} + +// SetName sets the "name" field. +func (cuo *ContributorUpdateOne) SetName(s string) *ContributorUpdateOne { + cuo.mutation.SetName(s) + return cuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cuo *ContributorUpdateOne) SetNillableName(s *string) *ContributorUpdateOne { + if s != nil { + cuo.SetName(*s) + } + return cuo +} + +// SetAvatarURL sets the "avatar_url" field. +func (cuo *ContributorUpdateOne) SetAvatarURL(s string) *ContributorUpdateOne { + cuo.mutation.SetAvatarURL(s) + return cuo +} + +// SetNillableAvatarURL sets the "avatar_url" field if the given value is not nil. +func (cuo *ContributorUpdateOne) SetNillableAvatarURL(s *string) *ContributorUpdateOne { + if s != nil { + cuo.SetAvatarURL(*s) + } + return cuo +} + +// ClearAvatarURL clears the value of the "avatar_url" field. +func (cuo *ContributorUpdateOne) ClearAvatarURL() *ContributorUpdateOne { + cuo.mutation.ClearAvatarURL() + return cuo +} + +// SetBio sets the "bio" field. +func (cuo *ContributorUpdateOne) SetBio(s string) *ContributorUpdateOne { + cuo.mutation.SetBio(s) + return cuo +} + +// SetNillableBio sets the "bio" field if the given value is not nil. +func (cuo *ContributorUpdateOne) SetNillableBio(s *string) *ContributorUpdateOne { + if s != nil { + cuo.SetBio(*s) + } + return cuo +} + +// ClearBio clears the value of the "bio" field. +func (cuo *ContributorUpdateOne) ClearBio() *ContributorUpdateOne { + cuo.mutation.ClearBio() + return cuo +} + +// SetCreatedAt sets the "created_at" field. +func (cuo *ContributorUpdateOne) SetCreatedAt(t time.Time) *ContributorUpdateOne { + cuo.mutation.SetCreatedAt(t) + return cuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (cuo *ContributorUpdateOne) SetNillableCreatedAt(t *time.Time) *ContributorUpdateOne { + if t != nil { + cuo.SetCreatedAt(*t) + } + return cuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (cuo *ContributorUpdateOne) SetUpdatedAt(t time.Time) *ContributorUpdateOne { + cuo.mutation.SetUpdatedAt(t) + return cuo +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (cuo *ContributorUpdateOne) SetUserID(id int) *ContributorUpdateOne { + cuo.mutation.SetUserID(id) + return cuo +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (cuo *ContributorUpdateOne) SetNillableUserID(id *int) *ContributorUpdateOne { + if id != nil { + cuo = cuo.SetUserID(*id) + } + return cuo +} + +// SetUser sets the "user" edge to the User entity. +func (cuo *ContributorUpdateOne) SetUser(u *User) *ContributorUpdateOne { + return cuo.SetUserID(u.ID) +} + +// AddSocialLinkIDs adds the "social_links" edge to the ContributorSocialLink entity by IDs. +func (cuo *ContributorUpdateOne) AddSocialLinkIDs(ids ...int) *ContributorUpdateOne { + cuo.mutation.AddSocialLinkIDs(ids...) + return cuo +} + +// AddSocialLinks adds the "social_links" edges to the ContributorSocialLink entity. +func (cuo *ContributorUpdateOne) AddSocialLinks(c ...*ContributorSocialLink) *ContributorUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cuo.AddSocialLinkIDs(ids...) +} + +// AddPostIDs adds the "posts" edge to the PostContributor entity by IDs. +func (cuo *ContributorUpdateOne) AddPostIDs(ids ...int) *ContributorUpdateOne { + cuo.mutation.AddPostIDs(ids...) + return cuo +} + +// AddPosts adds the "posts" edges to the PostContributor entity. +func (cuo *ContributorUpdateOne) AddPosts(p ...*PostContributor) *ContributorUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cuo.AddPostIDs(ids...) +} + +// Mutation returns the ContributorMutation object of the builder. +func (cuo *ContributorUpdateOne) Mutation() *ContributorMutation { + return cuo.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (cuo *ContributorUpdateOne) ClearUser() *ContributorUpdateOne { + cuo.mutation.ClearUser() + return cuo +} + +// ClearSocialLinks clears all "social_links" edges to the ContributorSocialLink entity. +func (cuo *ContributorUpdateOne) ClearSocialLinks() *ContributorUpdateOne { + cuo.mutation.ClearSocialLinks() + return cuo +} + +// RemoveSocialLinkIDs removes the "social_links" edge to ContributorSocialLink entities by IDs. +func (cuo *ContributorUpdateOne) RemoveSocialLinkIDs(ids ...int) *ContributorUpdateOne { + cuo.mutation.RemoveSocialLinkIDs(ids...) + return cuo +} + +// RemoveSocialLinks removes "social_links" edges to ContributorSocialLink entities. +func (cuo *ContributorUpdateOne) RemoveSocialLinks(c ...*ContributorSocialLink) *ContributorUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return cuo.RemoveSocialLinkIDs(ids...) +} + +// ClearPosts clears all "posts" edges to the PostContributor entity. +func (cuo *ContributorUpdateOne) ClearPosts() *ContributorUpdateOne { + cuo.mutation.ClearPosts() + return cuo +} + +// RemovePostIDs removes the "posts" edge to PostContributor entities by IDs. +func (cuo *ContributorUpdateOne) RemovePostIDs(ids ...int) *ContributorUpdateOne { + cuo.mutation.RemovePostIDs(ids...) + return cuo +} + +// RemovePosts removes "posts" edges to PostContributor entities. +func (cuo *ContributorUpdateOne) RemovePosts(p ...*PostContributor) *ContributorUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cuo.RemovePostIDs(ids...) +} + +// Where appends a list predicates to the ContributorUpdate builder. +func (cuo *ContributorUpdateOne) Where(ps ...predicate.Contributor) *ContributorUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cuo *ContributorUpdateOne) Select(field string, fields ...string) *ContributorUpdateOne { + cuo.fields = append([]string{field}, fields...) + return cuo +} + +// Save executes the query and returns the updated Contributor entity. +func (cuo *ContributorUpdateOne) Save(ctx context.Context) (*Contributor, error) { + cuo.defaults() + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *ContributorUpdateOne) SaveX(ctx context.Context) *Contributor { + node, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cuo *ContributorUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *ContributorUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cuo *ContributorUpdateOne) defaults() { + if _, ok := cuo.mutation.UpdatedAt(); !ok { + v := contributor.UpdateDefaultUpdatedAt() + cuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cuo *ContributorUpdateOne) check() error { + if v, ok := cuo.mutation.Name(); ok { + if err := contributor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Contributor.name": %w`, err)} + } + } + return nil +} + +func (cuo *ContributorUpdateOne) sqlSave(ctx context.Context) (_node *Contributor, err error) { + if err := cuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(contributor.Table, contributor.Columns, sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt)) + id, ok := cuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Contributor.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributor.FieldID) + for _, f := range fields { + if !contributor.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != contributor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cuo.mutation.Name(); ok { + _spec.SetField(contributor.FieldName, field.TypeString, value) + } + if value, ok := cuo.mutation.AvatarURL(); ok { + _spec.SetField(contributor.FieldAvatarURL, field.TypeString, value) + } + if cuo.mutation.AvatarURLCleared() { + _spec.ClearField(contributor.FieldAvatarURL, field.TypeString) + } + if value, ok := cuo.mutation.Bio(); ok { + _spec.SetField(contributor.FieldBio, field.TypeString, value) + } + if cuo.mutation.BioCleared() { + _spec.ClearField(contributor.FieldBio, field.TypeString) + } + if value, ok := cuo.mutation.CreatedAt(); ok { + _spec.SetField(contributor.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := cuo.mutation.UpdatedAt(); ok { + _spec.SetField(contributor.FieldUpdatedAt, field.TypeTime, value) + } + if cuo.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributor.UserTable, + Columns: []string{contributor.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributor.UserTable, + Columns: []string{contributor.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cuo.mutation.SocialLinksCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.RemovedSocialLinksIDs(); len(nodes) > 0 && !cuo.mutation.SocialLinksCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.SocialLinksIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.SocialLinksTable, + Columns: []string{contributor.SocialLinksColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if cuo.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.RemovedPostsIDs(); len(nodes) > 0 && !cuo.mutation.PostsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cuo.mutation.PostsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributor.PostsTable, + Columns: []string{contributor.PostsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Contributor{config: cuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/contributorrole.go b/backend/ent/contributorrole.go new file mode 100644 index 0000000..b0db560 --- /dev/null +++ b/backend/ent/contributorrole.go @@ -0,0 +1,129 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/contributorrole" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// ContributorRole is the model entity for the ContributorRole schema. +type ContributorRole struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ContributorRoleQuery when eager-loading is set. + Edges ContributorRoleEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ContributorRoleEdges holds the relations/edges for other nodes in the graph. +type ContributorRoleEdges struct { + // PostContributors holds the value of the post_contributors edge. + PostContributors []*PostContributor `json:"post_contributors,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// PostContributorsOrErr returns the PostContributors value or an error if the edge +// was not loaded in eager-loading. +func (e ContributorRoleEdges) PostContributorsOrErr() ([]*PostContributor, error) { + if e.loadedTypes[0] { + return e.PostContributors, nil + } + return nil, &NotLoadedError{edge: "post_contributors"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ContributorRole) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case contributorrole.FieldID: + values[i] = new(sql.NullInt64) + case contributorrole.FieldName: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ContributorRole fields. +func (cr *ContributorRole) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case contributorrole.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + cr.ID = int(value.Int64) + case contributorrole.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + cr.Name = value.String + } + default: + cr.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ContributorRole. +// This includes values selected through modifiers, order, etc. +func (cr *ContributorRole) Value(name string) (ent.Value, error) { + return cr.selectValues.Get(name) +} + +// QueryPostContributors queries the "post_contributors" edge of the ContributorRole entity. +func (cr *ContributorRole) QueryPostContributors() *PostContributorQuery { + return NewContributorRoleClient(cr.config).QueryPostContributors(cr) +} + +// Update returns a builder for updating this ContributorRole. +// Note that you need to call ContributorRole.Unwrap() before calling this method if this ContributorRole +// was returned from a transaction, and the transaction was committed or rolled back. +func (cr *ContributorRole) Update() *ContributorRoleUpdateOne { + return NewContributorRoleClient(cr.config).UpdateOne(cr) +} + +// Unwrap unwraps the ContributorRole entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (cr *ContributorRole) Unwrap() *ContributorRole { + _tx, ok := cr.config.driver.(*txDriver) + if !ok { + panic("ent: ContributorRole is not a transactional entity") + } + cr.config.driver = _tx.drv + return cr +} + +// String implements the fmt.Stringer. +func (cr *ContributorRole) String() string { + var builder strings.Builder + builder.WriteString("ContributorRole(") + builder.WriteString(fmt.Sprintf("id=%v, ", cr.ID)) + builder.WriteString("name=") + builder.WriteString(cr.Name) + builder.WriteByte(')') + return builder.String() +} + +// ContributorRoles is a parsable slice of ContributorRole. +type ContributorRoles []*ContributorRole diff --git a/backend/ent/contributorrole/contributorrole.go b/backend/ent/contributorrole/contributorrole.go new file mode 100644 index 0000000..a5e0d15 --- /dev/null +++ b/backend/ent/contributorrole/contributorrole.go @@ -0,0 +1,83 @@ +// Code generated by ent, DO NOT EDIT. + +package contributorrole + +import ( + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the contributorrole type in the database. + Label = "contributor_role" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // EdgePostContributors holds the string denoting the post_contributors edge name in mutations. + EdgePostContributors = "post_contributors" + // Table holds the table name of the contributorrole in the database. + Table = "contributor_roles" + // PostContributorsTable is the table that holds the post_contributors relation/edge. + PostContributorsTable = "post_contributors" + // PostContributorsInverseTable is the table name for the PostContributor entity. + // It exists in this package in order to avoid circular dependency with the "postcontributor" package. + PostContributorsInverseTable = "post_contributors" + // PostContributorsColumn is the table column denoting the post_contributors relation/edge. + PostContributorsColumn = "contributor_role_post_contributors" +) + +// Columns holds all SQL columns for contributorrole fields. +var Columns = []string{ + FieldID, + FieldName, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error +) + +// OrderOption defines the ordering options for the ContributorRole queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByPostContributorsCount orders the results by post_contributors count. +func ByPostContributorsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPostContributorsStep(), opts...) + } +} + +// ByPostContributors orders the results by post_contributors terms. +func ByPostContributors(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPostContributorsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPostContributorsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PostContributorsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostContributorsTable, PostContributorsColumn), + ) +} diff --git a/backend/ent/contributorrole/where.go b/backend/ent/contributorrole/where.go new file mode 100644 index 0000000..3a16706 --- /dev/null +++ b/backend/ent/contributorrole/where.go @@ -0,0 +1,163 @@ +// Code generated by ent, DO NOT EDIT. + +package contributorrole + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldEQ(FieldName, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.ContributorRole { + return predicate.ContributorRole(sql.FieldContainsFold(FieldName, v)) +} + +// HasPostContributors applies the HasEdge predicate on the "post_contributors" edge. +func HasPostContributors() predicate.ContributorRole { + return predicate.ContributorRole(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PostContributorsTable, PostContributorsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPostContributorsWith applies the HasEdge predicate on the "post_contributors" edge with a given conditions (other predicates). +func HasPostContributorsWith(preds ...predicate.PostContributor) predicate.ContributorRole { + return predicate.ContributorRole(func(s *sql.Selector) { + step := newPostContributorsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ContributorRole) predicate.ContributorRole { + return predicate.ContributorRole(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ContributorRole) predicate.ContributorRole { + return predicate.ContributorRole(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ContributorRole) predicate.ContributorRole { + return predicate.ContributorRole(sql.NotPredicates(p)) +} diff --git a/backend/ent/contributorrole_create.go b/backend/ent/contributorrole_create.go new file mode 100644 index 0000000..277de08 --- /dev/null +++ b/backend/ent/contributorrole_create.go @@ -0,0 +1,220 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/postcontributor" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorRoleCreate is the builder for creating a ContributorRole entity. +type ContributorRoleCreate struct { + config + mutation *ContributorRoleMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (crc *ContributorRoleCreate) SetName(s string) *ContributorRoleCreate { + crc.mutation.SetName(s) + return crc +} + +// AddPostContributorIDs adds the "post_contributors" edge to the PostContributor entity by IDs. +func (crc *ContributorRoleCreate) AddPostContributorIDs(ids ...int) *ContributorRoleCreate { + crc.mutation.AddPostContributorIDs(ids...) + return crc +} + +// AddPostContributors adds the "post_contributors" edges to the PostContributor entity. +func (crc *ContributorRoleCreate) AddPostContributors(p ...*PostContributor) *ContributorRoleCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return crc.AddPostContributorIDs(ids...) +} + +// Mutation returns the ContributorRoleMutation object of the builder. +func (crc *ContributorRoleCreate) Mutation() *ContributorRoleMutation { + return crc.mutation +} + +// Save creates the ContributorRole in the database. +func (crc *ContributorRoleCreate) Save(ctx context.Context) (*ContributorRole, error) { + return withHooks(ctx, crc.sqlSave, crc.mutation, crc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (crc *ContributorRoleCreate) SaveX(ctx context.Context) *ContributorRole { + v, err := crc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (crc *ContributorRoleCreate) Exec(ctx context.Context) error { + _, err := crc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (crc *ContributorRoleCreate) ExecX(ctx context.Context) { + if err := crc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (crc *ContributorRoleCreate) check() error { + if _, ok := crc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ContributorRole.name"`)} + } + if v, ok := crc.mutation.Name(); ok { + if err := contributorrole.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ContributorRole.name": %w`, err)} + } + } + return nil +} + +func (crc *ContributorRoleCreate) sqlSave(ctx context.Context) (*ContributorRole, error) { + if err := crc.check(); err != nil { + return nil, err + } + _node, _spec := crc.createSpec() + if err := sqlgraph.CreateNode(ctx, crc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + crc.mutation.id = &_node.ID + crc.mutation.done = true + return _node, nil +} + +func (crc *ContributorRoleCreate) createSpec() (*ContributorRole, *sqlgraph.CreateSpec) { + var ( + _node = &ContributorRole{config: crc.config} + _spec = sqlgraph.NewCreateSpec(contributorrole.Table, sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt)) + ) + if value, ok := crc.mutation.Name(); ok { + _spec.SetField(contributorrole.FieldName, field.TypeString, value) + _node.Name = value + } + if nodes := crc.mutation.PostContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// ContributorRoleCreateBulk is the builder for creating many ContributorRole entities in bulk. +type ContributorRoleCreateBulk struct { + config + err error + builders []*ContributorRoleCreate +} + +// Save creates the ContributorRole entities in the database. +func (crcb *ContributorRoleCreateBulk) Save(ctx context.Context) ([]*ContributorRole, error) { + if crcb.err != nil { + return nil, crcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(crcb.builders)) + nodes := make([]*ContributorRole, len(crcb.builders)) + mutators := make([]Mutator, len(crcb.builders)) + for i := range crcb.builders { + func(i int, root context.Context) { + builder := crcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ContributorRoleMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, crcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, crcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, crcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (crcb *ContributorRoleCreateBulk) SaveX(ctx context.Context) []*ContributorRole { + v, err := crcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (crcb *ContributorRoleCreateBulk) Exec(ctx context.Context) error { + _, err := crcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (crcb *ContributorRoleCreateBulk) ExecX(ctx context.Context) { + if err := crcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributorrole_delete.go b/backend/ent/contributorrole_delete.go new file mode 100644 index 0000000..ac9bbbd --- /dev/null +++ b/backend/ent/contributorrole_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorRoleDelete is the builder for deleting a ContributorRole entity. +type ContributorRoleDelete struct { + config + hooks []Hook + mutation *ContributorRoleMutation +} + +// Where appends a list predicates to the ContributorRoleDelete builder. +func (crd *ContributorRoleDelete) Where(ps ...predicate.ContributorRole) *ContributorRoleDelete { + crd.mutation.Where(ps...) + return crd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (crd *ContributorRoleDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, crd.sqlExec, crd.mutation, crd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (crd *ContributorRoleDelete) ExecX(ctx context.Context) int { + n, err := crd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (crd *ContributorRoleDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(contributorrole.Table, sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt)) + if ps := crd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, crd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + crd.mutation.done = true + return affected, err +} + +// ContributorRoleDeleteOne is the builder for deleting a single ContributorRole entity. +type ContributorRoleDeleteOne struct { + crd *ContributorRoleDelete +} + +// Where appends a list predicates to the ContributorRoleDelete builder. +func (crdo *ContributorRoleDeleteOne) Where(ps ...predicate.ContributorRole) *ContributorRoleDeleteOne { + crdo.crd.mutation.Where(ps...) + return crdo +} + +// Exec executes the deletion query. +func (crdo *ContributorRoleDeleteOne) Exec(ctx context.Context) error { + n, err := crdo.crd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{contributorrole.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (crdo *ContributorRoleDeleteOne) ExecX(ctx context.Context) { + if err := crdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributorrole_query.go b/backend/ent/contributorrole_query.go new file mode 100644 index 0000000..4d2a0bc --- /dev/null +++ b/backend/ent/contributorrole_query.go @@ -0,0 +1,609 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorRoleQuery is the builder for querying ContributorRole entities. +type ContributorRoleQuery struct { + config + ctx *QueryContext + order []contributorrole.OrderOption + inters []Interceptor + predicates []predicate.ContributorRole + withPostContributors *PostContributorQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ContributorRoleQuery builder. +func (crq *ContributorRoleQuery) Where(ps ...predicate.ContributorRole) *ContributorRoleQuery { + crq.predicates = append(crq.predicates, ps...) + return crq +} + +// Limit the number of records to be returned by this query. +func (crq *ContributorRoleQuery) Limit(limit int) *ContributorRoleQuery { + crq.ctx.Limit = &limit + return crq +} + +// Offset to start from. +func (crq *ContributorRoleQuery) Offset(offset int) *ContributorRoleQuery { + crq.ctx.Offset = &offset + return crq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (crq *ContributorRoleQuery) Unique(unique bool) *ContributorRoleQuery { + crq.ctx.Unique = &unique + return crq +} + +// Order specifies how the records should be ordered. +func (crq *ContributorRoleQuery) Order(o ...contributorrole.OrderOption) *ContributorRoleQuery { + crq.order = append(crq.order, o...) + return crq +} + +// QueryPostContributors chains the current query on the "post_contributors" edge. +func (crq *ContributorRoleQuery) QueryPostContributors() *PostContributorQuery { + query := (&PostContributorClient{config: crq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := crq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := crq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(contributorrole.Table, contributorrole.FieldID, selector), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, contributorrole.PostContributorsTable, contributorrole.PostContributorsColumn), + ) + fromU = sqlgraph.SetNeighbors(crq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ContributorRole entity from the query. +// Returns a *NotFoundError when no ContributorRole was found. +func (crq *ContributorRoleQuery) First(ctx context.Context) (*ContributorRole, error) { + nodes, err := crq.Limit(1).All(setContextOp(ctx, crq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{contributorrole.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (crq *ContributorRoleQuery) FirstX(ctx context.Context) *ContributorRole { + node, err := crq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ContributorRole ID from the query. +// Returns a *NotFoundError when no ContributorRole ID was found. +func (crq *ContributorRoleQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = crq.Limit(1).IDs(setContextOp(ctx, crq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{contributorrole.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (crq *ContributorRoleQuery) FirstIDX(ctx context.Context) int { + id, err := crq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ContributorRole entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ContributorRole entity is found. +// Returns a *NotFoundError when no ContributorRole entities are found. +func (crq *ContributorRoleQuery) Only(ctx context.Context) (*ContributorRole, error) { + nodes, err := crq.Limit(2).All(setContextOp(ctx, crq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{contributorrole.Label} + default: + return nil, &NotSingularError{contributorrole.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (crq *ContributorRoleQuery) OnlyX(ctx context.Context) *ContributorRole { + node, err := crq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ContributorRole ID in the query. +// Returns a *NotSingularError when more than one ContributorRole ID is found. +// Returns a *NotFoundError when no entities are found. +func (crq *ContributorRoleQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = crq.Limit(2).IDs(setContextOp(ctx, crq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{contributorrole.Label} + default: + err = &NotSingularError{contributorrole.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (crq *ContributorRoleQuery) OnlyIDX(ctx context.Context) int { + id, err := crq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ContributorRoles. +func (crq *ContributorRoleQuery) All(ctx context.Context) ([]*ContributorRole, error) { + ctx = setContextOp(ctx, crq.ctx, ent.OpQueryAll) + if err := crq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ContributorRole, *ContributorRoleQuery]() + return withInterceptors[[]*ContributorRole](ctx, crq, qr, crq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (crq *ContributorRoleQuery) AllX(ctx context.Context) []*ContributorRole { + nodes, err := crq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ContributorRole IDs. +func (crq *ContributorRoleQuery) IDs(ctx context.Context) (ids []int, err error) { + if crq.ctx.Unique == nil && crq.path != nil { + crq.Unique(true) + } + ctx = setContextOp(ctx, crq.ctx, ent.OpQueryIDs) + if err = crq.Select(contributorrole.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (crq *ContributorRoleQuery) IDsX(ctx context.Context) []int { + ids, err := crq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (crq *ContributorRoleQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, crq.ctx, ent.OpQueryCount) + if err := crq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, crq, querierCount[*ContributorRoleQuery](), crq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (crq *ContributorRoleQuery) CountX(ctx context.Context) int { + count, err := crq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (crq *ContributorRoleQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, crq.ctx, ent.OpQueryExist) + switch _, err := crq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (crq *ContributorRoleQuery) ExistX(ctx context.Context) bool { + exist, err := crq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ContributorRoleQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (crq *ContributorRoleQuery) Clone() *ContributorRoleQuery { + if crq == nil { + return nil + } + return &ContributorRoleQuery{ + config: crq.config, + ctx: crq.ctx.Clone(), + order: append([]contributorrole.OrderOption{}, crq.order...), + inters: append([]Interceptor{}, crq.inters...), + predicates: append([]predicate.ContributorRole{}, crq.predicates...), + withPostContributors: crq.withPostContributors.Clone(), + // clone intermediate query. + sql: crq.sql.Clone(), + path: crq.path, + } +} + +// WithPostContributors tells the query-builder to eager-load the nodes that are connected to +// the "post_contributors" edge. The optional arguments are used to configure the query builder of the edge. +func (crq *ContributorRoleQuery) WithPostContributors(opts ...func(*PostContributorQuery)) *ContributorRoleQuery { + query := (&PostContributorClient{config: crq.config}).Query() + for _, opt := range opts { + opt(query) + } + crq.withPostContributors = query + return crq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ContributorRole.Query(). +// GroupBy(contributorrole.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (crq *ContributorRoleQuery) GroupBy(field string, fields ...string) *ContributorRoleGroupBy { + crq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ContributorRoleGroupBy{build: crq} + grbuild.flds = &crq.ctx.Fields + grbuild.label = contributorrole.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.ContributorRole.Query(). +// Select(contributorrole.FieldName). +// Scan(ctx, &v) +func (crq *ContributorRoleQuery) Select(fields ...string) *ContributorRoleSelect { + crq.ctx.Fields = append(crq.ctx.Fields, fields...) + sbuild := &ContributorRoleSelect{ContributorRoleQuery: crq} + sbuild.label = contributorrole.Label + sbuild.flds, sbuild.scan = &crq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ContributorRoleSelect configured with the given aggregations. +func (crq *ContributorRoleQuery) Aggregate(fns ...AggregateFunc) *ContributorRoleSelect { + return crq.Select().Aggregate(fns...) +} + +func (crq *ContributorRoleQuery) prepareQuery(ctx context.Context) error { + for _, inter := range crq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, crq); err != nil { + return err + } + } + } + for _, f := range crq.ctx.Fields { + if !contributorrole.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if crq.path != nil { + prev, err := crq.path(ctx) + if err != nil { + return err + } + crq.sql = prev + } + return nil +} + +func (crq *ContributorRoleQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ContributorRole, error) { + var ( + nodes = []*ContributorRole{} + _spec = crq.querySpec() + loadedTypes = [1]bool{ + crq.withPostContributors != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ContributorRole).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ContributorRole{config: crq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, crq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := crq.withPostContributors; query != nil { + if err := crq.loadPostContributors(ctx, query, nodes, + func(n *ContributorRole) { n.Edges.PostContributors = []*PostContributor{} }, + func(n *ContributorRole, e *PostContributor) { + n.Edges.PostContributors = append(n.Edges.PostContributors, e) + }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (crq *ContributorRoleQuery) loadPostContributors(ctx context.Context, query *PostContributorQuery, nodes []*ContributorRole, init func(*ContributorRole), assign func(*ContributorRole, *PostContributor)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*ContributorRole) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.PostContributor(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(contributorrole.PostContributorsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.contributor_role_post_contributors + if fk == nil { + return fmt.Errorf(`foreign-key "contributor_role_post_contributors" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "contributor_role_post_contributors" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (crq *ContributorRoleQuery) sqlCount(ctx context.Context) (int, error) { + _spec := crq.querySpec() + _spec.Node.Columns = crq.ctx.Fields + if len(crq.ctx.Fields) > 0 { + _spec.Unique = crq.ctx.Unique != nil && *crq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, crq.driver, _spec) +} + +func (crq *ContributorRoleQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(contributorrole.Table, contributorrole.Columns, sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt)) + _spec.From = crq.sql + if unique := crq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if crq.path != nil { + _spec.Unique = true + } + if fields := crq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributorrole.FieldID) + for i := range fields { + if fields[i] != contributorrole.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := crq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := crq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := crq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := crq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (crq *ContributorRoleQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(crq.driver.Dialect()) + t1 := builder.Table(contributorrole.Table) + columns := crq.ctx.Fields + if len(columns) == 0 { + columns = contributorrole.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if crq.sql != nil { + selector = crq.sql + selector.Select(selector.Columns(columns...)...) + } + if crq.ctx.Unique != nil && *crq.ctx.Unique { + selector.Distinct() + } + for _, p := range crq.predicates { + p(selector) + } + for _, p := range crq.order { + p(selector) + } + if offset := crq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := crq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ContributorRoleGroupBy is the group-by builder for ContributorRole entities. +type ContributorRoleGroupBy struct { + selector + build *ContributorRoleQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (crgb *ContributorRoleGroupBy) Aggregate(fns ...AggregateFunc) *ContributorRoleGroupBy { + crgb.fns = append(crgb.fns, fns...) + return crgb +} + +// Scan applies the selector query and scans the result into the given value. +func (crgb *ContributorRoleGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, crgb.build.ctx, ent.OpQueryGroupBy) + if err := crgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorRoleQuery, *ContributorRoleGroupBy](ctx, crgb.build, crgb, crgb.build.inters, v) +} + +func (crgb *ContributorRoleGroupBy) sqlScan(ctx context.Context, root *ContributorRoleQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(crgb.fns)) + for _, fn := range crgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*crgb.flds)+len(crgb.fns)) + for _, f := range *crgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*crgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := crgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ContributorRoleSelect is the builder for selecting fields of ContributorRole entities. +type ContributorRoleSelect struct { + *ContributorRoleQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (crs *ContributorRoleSelect) Aggregate(fns ...AggregateFunc) *ContributorRoleSelect { + crs.fns = append(crs.fns, fns...) + return crs +} + +// Scan applies the selector query and scans the result into the given value. +func (crs *ContributorRoleSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, crs.ctx, ent.OpQuerySelect) + if err := crs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorRoleQuery, *ContributorRoleSelect](ctx, crs.ContributorRoleQuery, crs, crs.inters, v) +} + +func (crs *ContributorRoleSelect) sqlScan(ctx context.Context, root *ContributorRoleQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(crs.fns)) + for _, fn := range crs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*crs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := crs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/contributorrole_update.go b/backend/ent/contributorrole_update.go new file mode 100644 index 0000000..4819a90 --- /dev/null +++ b/backend/ent/contributorrole_update.go @@ -0,0 +1,398 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorRoleUpdate is the builder for updating ContributorRole entities. +type ContributorRoleUpdate struct { + config + hooks []Hook + mutation *ContributorRoleMutation +} + +// Where appends a list predicates to the ContributorRoleUpdate builder. +func (cru *ContributorRoleUpdate) Where(ps ...predicate.ContributorRole) *ContributorRoleUpdate { + cru.mutation.Where(ps...) + return cru +} + +// SetName sets the "name" field. +func (cru *ContributorRoleUpdate) SetName(s string) *ContributorRoleUpdate { + cru.mutation.SetName(s) + return cru +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cru *ContributorRoleUpdate) SetNillableName(s *string) *ContributorRoleUpdate { + if s != nil { + cru.SetName(*s) + } + return cru +} + +// AddPostContributorIDs adds the "post_contributors" edge to the PostContributor entity by IDs. +func (cru *ContributorRoleUpdate) AddPostContributorIDs(ids ...int) *ContributorRoleUpdate { + cru.mutation.AddPostContributorIDs(ids...) + return cru +} + +// AddPostContributors adds the "post_contributors" edges to the PostContributor entity. +func (cru *ContributorRoleUpdate) AddPostContributors(p ...*PostContributor) *ContributorRoleUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cru.AddPostContributorIDs(ids...) +} + +// Mutation returns the ContributorRoleMutation object of the builder. +func (cru *ContributorRoleUpdate) Mutation() *ContributorRoleMutation { + return cru.mutation +} + +// ClearPostContributors clears all "post_contributors" edges to the PostContributor entity. +func (cru *ContributorRoleUpdate) ClearPostContributors() *ContributorRoleUpdate { + cru.mutation.ClearPostContributors() + return cru +} + +// RemovePostContributorIDs removes the "post_contributors" edge to PostContributor entities by IDs. +func (cru *ContributorRoleUpdate) RemovePostContributorIDs(ids ...int) *ContributorRoleUpdate { + cru.mutation.RemovePostContributorIDs(ids...) + return cru +} + +// RemovePostContributors removes "post_contributors" edges to PostContributor entities. +func (cru *ContributorRoleUpdate) RemovePostContributors(p ...*PostContributor) *ContributorRoleUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cru.RemovePostContributorIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cru *ContributorRoleUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, cru.sqlSave, cru.mutation, cru.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cru *ContributorRoleUpdate) SaveX(ctx context.Context) int { + affected, err := cru.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cru *ContributorRoleUpdate) Exec(ctx context.Context) error { + _, err := cru.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cru *ContributorRoleUpdate) ExecX(ctx context.Context) { + if err := cru.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cru *ContributorRoleUpdate) check() error { + if v, ok := cru.mutation.Name(); ok { + if err := contributorrole.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ContributorRole.name": %w`, err)} + } + } + return nil +} + +func (cru *ContributorRoleUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := cru.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(contributorrole.Table, contributorrole.Columns, sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt)) + if ps := cru.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cru.mutation.Name(); ok { + _spec.SetField(contributorrole.FieldName, field.TypeString, value) + } + if cru.mutation.PostContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cru.mutation.RemovedPostContributorsIDs(); len(nodes) > 0 && !cru.mutation.PostContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cru.mutation.PostContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, cru.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributorrole.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cru.mutation.done = true + return n, nil +} + +// ContributorRoleUpdateOne is the builder for updating a single ContributorRole entity. +type ContributorRoleUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ContributorRoleMutation +} + +// SetName sets the "name" field. +func (cruo *ContributorRoleUpdateOne) SetName(s string) *ContributorRoleUpdateOne { + cruo.mutation.SetName(s) + return cruo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cruo *ContributorRoleUpdateOne) SetNillableName(s *string) *ContributorRoleUpdateOne { + if s != nil { + cruo.SetName(*s) + } + return cruo +} + +// AddPostContributorIDs adds the "post_contributors" edge to the PostContributor entity by IDs. +func (cruo *ContributorRoleUpdateOne) AddPostContributorIDs(ids ...int) *ContributorRoleUpdateOne { + cruo.mutation.AddPostContributorIDs(ids...) + return cruo +} + +// AddPostContributors adds the "post_contributors" edges to the PostContributor entity. +func (cruo *ContributorRoleUpdateOne) AddPostContributors(p ...*PostContributor) *ContributorRoleUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cruo.AddPostContributorIDs(ids...) +} + +// Mutation returns the ContributorRoleMutation object of the builder. +func (cruo *ContributorRoleUpdateOne) Mutation() *ContributorRoleMutation { + return cruo.mutation +} + +// ClearPostContributors clears all "post_contributors" edges to the PostContributor entity. +func (cruo *ContributorRoleUpdateOne) ClearPostContributors() *ContributorRoleUpdateOne { + cruo.mutation.ClearPostContributors() + return cruo +} + +// RemovePostContributorIDs removes the "post_contributors" edge to PostContributor entities by IDs. +func (cruo *ContributorRoleUpdateOne) RemovePostContributorIDs(ids ...int) *ContributorRoleUpdateOne { + cruo.mutation.RemovePostContributorIDs(ids...) + return cruo +} + +// RemovePostContributors removes "post_contributors" edges to PostContributor entities. +func (cruo *ContributorRoleUpdateOne) RemovePostContributors(p ...*PostContributor) *ContributorRoleUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return cruo.RemovePostContributorIDs(ids...) +} + +// Where appends a list predicates to the ContributorRoleUpdate builder. +func (cruo *ContributorRoleUpdateOne) Where(ps ...predicate.ContributorRole) *ContributorRoleUpdateOne { + cruo.mutation.Where(ps...) + return cruo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cruo *ContributorRoleUpdateOne) Select(field string, fields ...string) *ContributorRoleUpdateOne { + cruo.fields = append([]string{field}, fields...) + return cruo +} + +// Save executes the query and returns the updated ContributorRole entity. +func (cruo *ContributorRoleUpdateOne) Save(ctx context.Context) (*ContributorRole, error) { + return withHooks(ctx, cruo.sqlSave, cruo.mutation, cruo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cruo *ContributorRoleUpdateOne) SaveX(ctx context.Context) *ContributorRole { + node, err := cruo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cruo *ContributorRoleUpdateOne) Exec(ctx context.Context) error { + _, err := cruo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cruo *ContributorRoleUpdateOne) ExecX(ctx context.Context) { + if err := cruo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cruo *ContributorRoleUpdateOne) check() error { + if v, ok := cruo.mutation.Name(); ok { + if err := contributorrole.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ContributorRole.name": %w`, err)} + } + } + return nil +} + +func (cruo *ContributorRoleUpdateOne) sqlSave(ctx context.Context) (_node *ContributorRole, err error) { + if err := cruo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(contributorrole.Table, contributorrole.Columns, sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt)) + id, ok := cruo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ContributorRole.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cruo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributorrole.FieldID) + for _, f := range fields { + if !contributorrole.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != contributorrole.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cruo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cruo.mutation.Name(); ok { + _spec.SetField(contributorrole.FieldName, field.TypeString, value) + } + if cruo.mutation.PostContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cruo.mutation.RemovedPostContributorsIDs(); len(nodes) > 0 && !cruo.mutation.PostContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cruo.mutation.PostContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: contributorrole.PostContributorsTable, + Columns: []string{contributorrole.PostContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ContributorRole{config: cruo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cruo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributorrole.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cruo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/contributorsociallink.go b/backend/ent/contributorsociallink.go new file mode 100644 index 0000000..fe570de --- /dev/null +++ b/backend/ent/contributorsociallink.go @@ -0,0 +1,164 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// ContributorSocialLink is the model entity for the ContributorSocialLink schema. +type ContributorSocialLink struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Type holds the value of the "type" field. + Type contributorsociallink.Type `json:"type,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ContributorSocialLinkQuery when eager-loading is set. + Edges ContributorSocialLinkEdges `json:"edges"` + contributor_social_links *int + selectValues sql.SelectValues +} + +// ContributorSocialLinkEdges holds the relations/edges for other nodes in the graph. +type ContributorSocialLinkEdges struct { + // Contributor holds the value of the contributor edge. + Contributor *Contributor `json:"contributor,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ContributorOrErr returns the Contributor value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ContributorSocialLinkEdges) ContributorOrErr() (*Contributor, error) { + if e.Contributor != nil { + return e.Contributor, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: contributor.Label} + } + return nil, &NotLoadedError{edge: "contributor"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ContributorSocialLink) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case contributorsociallink.FieldID: + values[i] = new(sql.NullInt64) + case contributorsociallink.FieldType, contributorsociallink.FieldName, contributorsociallink.FieldValue: + values[i] = new(sql.NullString) + case contributorsociallink.ForeignKeys[0]: // contributor_social_links + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ContributorSocialLink fields. +func (csl *ContributorSocialLink) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case contributorsociallink.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + csl.ID = int(value.Int64) + case contributorsociallink.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + csl.Type = contributorsociallink.Type(value.String) + } + case contributorsociallink.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + csl.Name = value.String + } + case contributorsociallink.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + csl.Value = value.String + } + case contributorsociallink.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field contributor_social_links", value) + } else if value.Valid { + csl.contributor_social_links = new(int) + *csl.contributor_social_links = int(value.Int64) + } + default: + csl.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the ContributorSocialLink. +// This includes values selected through modifiers, order, etc. +func (csl *ContributorSocialLink) GetValue(name string) (ent.Value, error) { + return csl.selectValues.Get(name) +} + +// QueryContributor queries the "contributor" edge of the ContributorSocialLink entity. +func (csl *ContributorSocialLink) QueryContributor() *ContributorQuery { + return NewContributorSocialLinkClient(csl.config).QueryContributor(csl) +} + +// Update returns a builder for updating this ContributorSocialLink. +// Note that you need to call ContributorSocialLink.Unwrap() before calling this method if this ContributorSocialLink +// was returned from a transaction, and the transaction was committed or rolled back. +func (csl *ContributorSocialLink) Update() *ContributorSocialLinkUpdateOne { + return NewContributorSocialLinkClient(csl.config).UpdateOne(csl) +} + +// Unwrap unwraps the ContributorSocialLink entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (csl *ContributorSocialLink) Unwrap() *ContributorSocialLink { + _tx, ok := csl.config.driver.(*txDriver) + if !ok { + panic("ent: ContributorSocialLink is not a transactional entity") + } + csl.config.driver = _tx.drv + return csl +} + +// String implements the fmt.Stringer. +func (csl *ContributorSocialLink) String() string { + var builder strings.Builder + builder.WriteString("ContributorSocialLink(") + builder.WriteString(fmt.Sprintf("id=%v, ", csl.ID)) + builder.WriteString("type=") + builder.WriteString(fmt.Sprintf("%v", csl.Type)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(csl.Name) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(csl.Value) + builder.WriteByte(')') + return builder.String() +} + +// ContributorSocialLinks is a parsable slice of ContributorSocialLink. +type ContributorSocialLinks []*ContributorSocialLink diff --git a/backend/ent/contributorsociallink/contributorsociallink.go b/backend/ent/contributorsociallink/contributorsociallink.go new file mode 100644 index 0000000..d1fb495 --- /dev/null +++ b/backend/ent/contributorsociallink/contributorsociallink.go @@ -0,0 +1,132 @@ +// Code generated by ent, DO NOT EDIT. + +package contributorsociallink + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the contributorsociallink type in the database. + Label = "contributor_social_link" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // EdgeContributor holds the string denoting the contributor edge name in mutations. + EdgeContributor = "contributor" + // Table holds the table name of the contributorsociallink in the database. + Table = "contributor_social_links" + // ContributorTable is the table that holds the contributor relation/edge. + ContributorTable = "contributor_social_links" + // ContributorInverseTable is the table name for the Contributor entity. + // It exists in this package in order to avoid circular dependency with the "contributor" package. + ContributorInverseTable = "contributors" + // ContributorColumn is the table column denoting the contributor relation/edge. + ContributorColumn = "contributor_social_links" +) + +// Columns holds all SQL columns for contributorsociallink fields. +var Columns = []string{ + FieldID, + FieldType, + FieldName, + FieldValue, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "contributor_social_links" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "contributor_social_links", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // ValueValidator is a validator for the "value" field. It is called by the builders before save. + ValueValidator func(string) error +) + +// Type defines the type for the "type" enum field. +type Type string + +// Type values. +const ( + TypeTWITTER Type = "twitter" + TypeFACEBOOK Type = "facebook" + TypeINSTAGRAM Type = "instagram" + TypeLINKEDIN Type = "linkedin" + TypeGITHUB Type = "github" + TypeWEBSITE Type = "website" +) + +func (_type Type) String() string { + return string(_type) +} + +// TypeValidator is a validator for the "type" field enum values. It is called by the builders before save. +func TypeValidator(_type Type) error { + switch _type { + case TypeTWITTER, TypeFACEBOOK, TypeINSTAGRAM, TypeLINKEDIN, TypeGITHUB, TypeWEBSITE: + return nil + default: + return fmt.Errorf("contributorsociallink: invalid enum value for type field: %q", _type) + } +} + +// OrderOption defines the ordering options for the ContributorSocialLink queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByContributorField orders the results by contributor field. +func ByContributorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContributorStep(), sql.OrderByField(field, opts...)) + } +} +func newContributorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContributorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ContributorTable, ContributorColumn), + ) +} diff --git a/backend/ent/contributorsociallink/where.go b/backend/ent/contributorsociallink/where.go new file mode 100644 index 0000000..a4ce4ab --- /dev/null +++ b/backend/ent/contributorsociallink/where.go @@ -0,0 +1,263 @@ +// Code generated by ent, DO NOT EDIT. + +package contributorsociallink + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldName, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldValue, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v Type) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v Type) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...Type) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...Type) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNotIn(FieldType, vs...)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldHasSuffix(FieldName, v)) +} + +// NameIsNil applies the IsNil predicate on the "name" field. +func NameIsNil() predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldIsNull(FieldName)) +} + +// NameNotNil applies the NotNil predicate on the "name" field. +func NameNotNil() predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNotNull(FieldName)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldContainsFold(FieldName, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.FieldContainsFold(FieldValue, v)) +} + +// HasContributor applies the HasEdge predicate on the "contributor" edge. +func HasContributor() predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ContributorTable, ContributorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContributorWith applies the HasEdge predicate on the "contributor" edge with a given conditions (other predicates). +func HasContributorWith(preds ...predicate.Contributor) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(func(s *sql.Selector) { + step := newContributorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ContributorSocialLink) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ContributorSocialLink) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ContributorSocialLink) predicate.ContributorSocialLink { + return predicate.ContributorSocialLink(sql.NotPredicates(p)) +} diff --git a/backend/ent/contributorsociallink_create.go b/backend/ent/contributorsociallink_create.go new file mode 100644 index 0000000..a9cac4a --- /dev/null +++ b/backend/ent/contributorsociallink_create.go @@ -0,0 +1,261 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorSocialLinkCreate is the builder for creating a ContributorSocialLink entity. +type ContributorSocialLinkCreate struct { + config + mutation *ContributorSocialLinkMutation + hooks []Hook +} + +// SetType sets the "type" field. +func (cslc *ContributorSocialLinkCreate) SetType(c contributorsociallink.Type) *ContributorSocialLinkCreate { + cslc.mutation.SetType(c) + return cslc +} + +// SetName sets the "name" field. +func (cslc *ContributorSocialLinkCreate) SetName(s string) *ContributorSocialLinkCreate { + cslc.mutation.SetName(s) + return cslc +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cslc *ContributorSocialLinkCreate) SetNillableName(s *string) *ContributorSocialLinkCreate { + if s != nil { + cslc.SetName(*s) + } + return cslc +} + +// SetValue sets the "value" field. +func (cslc *ContributorSocialLinkCreate) SetValue(s string) *ContributorSocialLinkCreate { + cslc.mutation.SetValue(s) + return cslc +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (cslc *ContributorSocialLinkCreate) SetContributorID(id int) *ContributorSocialLinkCreate { + cslc.mutation.SetContributorID(id) + return cslc +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (cslc *ContributorSocialLinkCreate) SetNillableContributorID(id *int) *ContributorSocialLinkCreate { + if id != nil { + cslc = cslc.SetContributorID(*id) + } + return cslc +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (cslc *ContributorSocialLinkCreate) SetContributor(c *Contributor) *ContributorSocialLinkCreate { + return cslc.SetContributorID(c.ID) +} + +// Mutation returns the ContributorSocialLinkMutation object of the builder. +func (cslc *ContributorSocialLinkCreate) Mutation() *ContributorSocialLinkMutation { + return cslc.mutation +} + +// Save creates the ContributorSocialLink in the database. +func (cslc *ContributorSocialLinkCreate) Save(ctx context.Context) (*ContributorSocialLink, error) { + return withHooks(ctx, cslc.sqlSave, cslc.mutation, cslc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cslc *ContributorSocialLinkCreate) SaveX(ctx context.Context) *ContributorSocialLink { + v, err := cslc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cslc *ContributorSocialLinkCreate) Exec(ctx context.Context) error { + _, err := cslc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cslc *ContributorSocialLinkCreate) ExecX(ctx context.Context) { + if err := cslc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cslc *ContributorSocialLinkCreate) check() error { + if _, ok := cslc.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "ContributorSocialLink.type"`)} + } + if v, ok := cslc.mutation.GetType(); ok { + if err := contributorsociallink.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.type": %w`, err)} + } + } + if _, ok := cslc.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "ContributorSocialLink.value"`)} + } + if v, ok := cslc.mutation.Value(); ok { + if err := contributorsociallink.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.value": %w`, err)} + } + } + return nil +} + +func (cslc *ContributorSocialLinkCreate) sqlSave(ctx context.Context) (*ContributorSocialLink, error) { + if err := cslc.check(); err != nil { + return nil, err + } + _node, _spec := cslc.createSpec() + if err := sqlgraph.CreateNode(ctx, cslc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + cslc.mutation.id = &_node.ID + cslc.mutation.done = true + return _node, nil +} + +func (cslc *ContributorSocialLinkCreate) createSpec() (*ContributorSocialLink, *sqlgraph.CreateSpec) { + var ( + _node = &ContributorSocialLink{config: cslc.config} + _spec = sqlgraph.NewCreateSpec(contributorsociallink.Table, sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt)) + ) + if value, ok := cslc.mutation.GetType(); ok { + _spec.SetField(contributorsociallink.FieldType, field.TypeEnum, value) + _node.Type = value + } + if value, ok := cslc.mutation.Name(); ok { + _spec.SetField(contributorsociallink.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := cslc.mutation.Value(); ok { + _spec.SetField(contributorsociallink.FieldValue, field.TypeString, value) + _node.Value = value + } + if nodes := cslc.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributorsociallink.ContributorTable, + Columns: []string{contributorsociallink.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.contributor_social_links = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// ContributorSocialLinkCreateBulk is the builder for creating many ContributorSocialLink entities in bulk. +type ContributorSocialLinkCreateBulk struct { + config + err error + builders []*ContributorSocialLinkCreate +} + +// Save creates the ContributorSocialLink entities in the database. +func (cslcb *ContributorSocialLinkCreateBulk) Save(ctx context.Context) ([]*ContributorSocialLink, error) { + if cslcb.err != nil { + return nil, cslcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(cslcb.builders)) + nodes := make([]*ContributorSocialLink, len(cslcb.builders)) + mutators := make([]Mutator, len(cslcb.builders)) + for i := range cslcb.builders { + func(i int, root context.Context) { + builder := cslcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ContributorSocialLinkMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, cslcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, cslcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, cslcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (cslcb *ContributorSocialLinkCreateBulk) SaveX(ctx context.Context) []*ContributorSocialLink { + v, err := cslcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cslcb *ContributorSocialLinkCreateBulk) Exec(ctx context.Context) error { + _, err := cslcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cslcb *ContributorSocialLinkCreateBulk) ExecX(ctx context.Context) { + if err := cslcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributorsociallink_delete.go b/backend/ent/contributorsociallink_delete.go new file mode 100644 index 0000000..aa1e303 --- /dev/null +++ b/backend/ent/contributorsociallink_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorSocialLinkDelete is the builder for deleting a ContributorSocialLink entity. +type ContributorSocialLinkDelete struct { + config + hooks []Hook + mutation *ContributorSocialLinkMutation +} + +// Where appends a list predicates to the ContributorSocialLinkDelete builder. +func (csld *ContributorSocialLinkDelete) Where(ps ...predicate.ContributorSocialLink) *ContributorSocialLinkDelete { + csld.mutation.Where(ps...) + return csld +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (csld *ContributorSocialLinkDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, csld.sqlExec, csld.mutation, csld.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (csld *ContributorSocialLinkDelete) ExecX(ctx context.Context) int { + n, err := csld.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (csld *ContributorSocialLinkDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(contributorsociallink.Table, sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt)) + if ps := csld.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, csld.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + csld.mutation.done = true + return affected, err +} + +// ContributorSocialLinkDeleteOne is the builder for deleting a single ContributorSocialLink entity. +type ContributorSocialLinkDeleteOne struct { + csld *ContributorSocialLinkDelete +} + +// Where appends a list predicates to the ContributorSocialLinkDelete builder. +func (csldo *ContributorSocialLinkDeleteOne) Where(ps ...predicate.ContributorSocialLink) *ContributorSocialLinkDeleteOne { + csldo.csld.mutation.Where(ps...) + return csldo +} + +// Exec executes the deletion query. +func (csldo *ContributorSocialLinkDeleteOne) Exec(ctx context.Context) error { + n, err := csldo.csld.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{contributorsociallink.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (csldo *ContributorSocialLinkDeleteOne) ExecX(ctx context.Context) { + if err := csldo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/contributorsociallink_query.go b/backend/ent/contributorsociallink_query.go new file mode 100644 index 0000000..1831594 --- /dev/null +++ b/backend/ent/contributorsociallink_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorSocialLinkQuery is the builder for querying ContributorSocialLink entities. +type ContributorSocialLinkQuery struct { + config + ctx *QueryContext + order []contributorsociallink.OrderOption + inters []Interceptor + predicates []predicate.ContributorSocialLink + withContributor *ContributorQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ContributorSocialLinkQuery builder. +func (cslq *ContributorSocialLinkQuery) Where(ps ...predicate.ContributorSocialLink) *ContributorSocialLinkQuery { + cslq.predicates = append(cslq.predicates, ps...) + return cslq +} + +// Limit the number of records to be returned by this query. +func (cslq *ContributorSocialLinkQuery) Limit(limit int) *ContributorSocialLinkQuery { + cslq.ctx.Limit = &limit + return cslq +} + +// Offset to start from. +func (cslq *ContributorSocialLinkQuery) Offset(offset int) *ContributorSocialLinkQuery { + cslq.ctx.Offset = &offset + return cslq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cslq *ContributorSocialLinkQuery) Unique(unique bool) *ContributorSocialLinkQuery { + cslq.ctx.Unique = &unique + return cslq +} + +// Order specifies how the records should be ordered. +func (cslq *ContributorSocialLinkQuery) Order(o ...contributorsociallink.OrderOption) *ContributorSocialLinkQuery { + cslq.order = append(cslq.order, o...) + return cslq +} + +// QueryContributor chains the current query on the "contributor" edge. +func (cslq *ContributorSocialLinkQuery) QueryContributor() *ContributorQuery { + query := (&ContributorClient{config: cslq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := cslq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := cslq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(contributorsociallink.Table, contributorsociallink.FieldID, selector), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, contributorsociallink.ContributorTable, contributorsociallink.ContributorColumn), + ) + fromU = sqlgraph.SetNeighbors(cslq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ContributorSocialLink entity from the query. +// Returns a *NotFoundError when no ContributorSocialLink was found. +func (cslq *ContributorSocialLinkQuery) First(ctx context.Context) (*ContributorSocialLink, error) { + nodes, err := cslq.Limit(1).All(setContextOp(ctx, cslq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{contributorsociallink.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) FirstX(ctx context.Context) *ContributorSocialLink { + node, err := cslq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ContributorSocialLink ID from the query. +// Returns a *NotFoundError when no ContributorSocialLink ID was found. +func (cslq *ContributorSocialLinkQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cslq.Limit(1).IDs(setContextOp(ctx, cslq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{contributorsociallink.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) FirstIDX(ctx context.Context) int { + id, err := cslq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ContributorSocialLink entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ContributorSocialLink entity is found. +// Returns a *NotFoundError when no ContributorSocialLink entities are found. +func (cslq *ContributorSocialLinkQuery) Only(ctx context.Context) (*ContributorSocialLink, error) { + nodes, err := cslq.Limit(2).All(setContextOp(ctx, cslq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{contributorsociallink.Label} + default: + return nil, &NotSingularError{contributorsociallink.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) OnlyX(ctx context.Context) *ContributorSocialLink { + node, err := cslq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ContributorSocialLink ID in the query. +// Returns a *NotSingularError when more than one ContributorSocialLink ID is found. +// Returns a *NotFoundError when no entities are found. +func (cslq *ContributorSocialLinkQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = cslq.Limit(2).IDs(setContextOp(ctx, cslq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{contributorsociallink.Label} + default: + err = &NotSingularError{contributorsociallink.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) OnlyIDX(ctx context.Context) int { + id, err := cslq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ContributorSocialLinks. +func (cslq *ContributorSocialLinkQuery) All(ctx context.Context) ([]*ContributorSocialLink, error) { + ctx = setContextOp(ctx, cslq.ctx, ent.OpQueryAll) + if err := cslq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ContributorSocialLink, *ContributorSocialLinkQuery]() + return withInterceptors[[]*ContributorSocialLink](ctx, cslq, qr, cslq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) AllX(ctx context.Context) []*ContributorSocialLink { + nodes, err := cslq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ContributorSocialLink IDs. +func (cslq *ContributorSocialLinkQuery) IDs(ctx context.Context) (ids []int, err error) { + if cslq.ctx.Unique == nil && cslq.path != nil { + cslq.Unique(true) + } + ctx = setContextOp(ctx, cslq.ctx, ent.OpQueryIDs) + if err = cslq.Select(contributorsociallink.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) IDsX(ctx context.Context) []int { + ids, err := cslq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cslq *ContributorSocialLinkQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cslq.ctx, ent.OpQueryCount) + if err := cslq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cslq, querierCount[*ContributorSocialLinkQuery](), cslq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) CountX(ctx context.Context) int { + count, err := cslq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cslq *ContributorSocialLinkQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cslq.ctx, ent.OpQueryExist) + switch _, err := cslq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cslq *ContributorSocialLinkQuery) ExistX(ctx context.Context) bool { + exist, err := cslq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ContributorSocialLinkQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cslq *ContributorSocialLinkQuery) Clone() *ContributorSocialLinkQuery { + if cslq == nil { + return nil + } + return &ContributorSocialLinkQuery{ + config: cslq.config, + ctx: cslq.ctx.Clone(), + order: append([]contributorsociallink.OrderOption{}, cslq.order...), + inters: append([]Interceptor{}, cslq.inters...), + predicates: append([]predicate.ContributorSocialLink{}, cslq.predicates...), + withContributor: cslq.withContributor.Clone(), + // clone intermediate query. + sql: cslq.sql.Clone(), + path: cslq.path, + } +} + +// WithContributor tells the query-builder to eager-load the nodes that are connected to +// the "contributor" edge. The optional arguments are used to configure the query builder of the edge. +func (cslq *ContributorSocialLinkQuery) WithContributor(opts ...func(*ContributorQuery)) *ContributorSocialLinkQuery { + query := (&ContributorClient{config: cslq.config}).Query() + for _, opt := range opts { + opt(query) + } + cslq.withContributor = query + return cslq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Type contributorsociallink.Type `json:"type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ContributorSocialLink.Query(). +// GroupBy(contributorsociallink.FieldType). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (cslq *ContributorSocialLinkQuery) GroupBy(field string, fields ...string) *ContributorSocialLinkGroupBy { + cslq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ContributorSocialLinkGroupBy{build: cslq} + grbuild.flds = &cslq.ctx.Fields + grbuild.label = contributorsociallink.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Type contributorsociallink.Type `json:"type,omitempty"` +// } +// +// client.ContributorSocialLink.Query(). +// Select(contributorsociallink.FieldType). +// Scan(ctx, &v) +func (cslq *ContributorSocialLinkQuery) Select(fields ...string) *ContributorSocialLinkSelect { + cslq.ctx.Fields = append(cslq.ctx.Fields, fields...) + sbuild := &ContributorSocialLinkSelect{ContributorSocialLinkQuery: cslq} + sbuild.label = contributorsociallink.Label + sbuild.flds, sbuild.scan = &cslq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ContributorSocialLinkSelect configured with the given aggregations. +func (cslq *ContributorSocialLinkQuery) Aggregate(fns ...AggregateFunc) *ContributorSocialLinkSelect { + return cslq.Select().Aggregate(fns...) +} + +func (cslq *ContributorSocialLinkQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cslq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cslq); err != nil { + return err + } + } + } + for _, f := range cslq.ctx.Fields { + if !contributorsociallink.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if cslq.path != nil { + prev, err := cslq.path(ctx) + if err != nil { + return err + } + cslq.sql = prev + } + return nil +} + +func (cslq *ContributorSocialLinkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ContributorSocialLink, error) { + var ( + nodes = []*ContributorSocialLink{} + withFKs = cslq.withFKs + _spec = cslq.querySpec() + loadedTypes = [1]bool{ + cslq.withContributor != nil, + } + ) + if cslq.withContributor != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, contributorsociallink.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ContributorSocialLink).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ContributorSocialLink{config: cslq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cslq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := cslq.withContributor; query != nil { + if err := cslq.loadContributor(ctx, query, nodes, nil, + func(n *ContributorSocialLink, e *Contributor) { n.Edges.Contributor = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (cslq *ContributorSocialLinkQuery) loadContributor(ctx context.Context, query *ContributorQuery, nodes []*ContributorSocialLink, init func(*ContributorSocialLink), assign func(*ContributorSocialLink, *Contributor)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*ContributorSocialLink) + for i := range nodes { + if nodes[i].contributor_social_links == nil { + continue + } + fk := *nodes[i].contributor_social_links + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(contributor.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "contributor_social_links" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (cslq *ContributorSocialLinkQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cslq.querySpec() + _spec.Node.Columns = cslq.ctx.Fields + if len(cslq.ctx.Fields) > 0 { + _spec.Unique = cslq.ctx.Unique != nil && *cslq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cslq.driver, _spec) +} + +func (cslq *ContributorSocialLinkQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(contributorsociallink.Table, contributorsociallink.Columns, sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt)) + _spec.From = cslq.sql + if unique := cslq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cslq.path != nil { + _spec.Unique = true + } + if fields := cslq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributorsociallink.FieldID) + for i := range fields { + if fields[i] != contributorsociallink.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cslq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cslq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cslq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cslq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cslq *ContributorSocialLinkQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cslq.driver.Dialect()) + t1 := builder.Table(contributorsociallink.Table) + columns := cslq.ctx.Fields + if len(columns) == 0 { + columns = contributorsociallink.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cslq.sql != nil { + selector = cslq.sql + selector.Select(selector.Columns(columns...)...) + } + if cslq.ctx.Unique != nil && *cslq.ctx.Unique { + selector.Distinct() + } + for _, p := range cslq.predicates { + p(selector) + } + for _, p := range cslq.order { + p(selector) + } + if offset := cslq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cslq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ContributorSocialLinkGroupBy is the group-by builder for ContributorSocialLink entities. +type ContributorSocialLinkGroupBy struct { + selector + build *ContributorSocialLinkQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cslgb *ContributorSocialLinkGroupBy) Aggregate(fns ...AggregateFunc) *ContributorSocialLinkGroupBy { + cslgb.fns = append(cslgb.fns, fns...) + return cslgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cslgb *ContributorSocialLinkGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cslgb.build.ctx, ent.OpQueryGroupBy) + if err := cslgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorSocialLinkQuery, *ContributorSocialLinkGroupBy](ctx, cslgb.build, cslgb, cslgb.build.inters, v) +} + +func (cslgb *ContributorSocialLinkGroupBy) sqlScan(ctx context.Context, root *ContributorSocialLinkQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cslgb.fns)) + for _, fn := range cslgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cslgb.flds)+len(cslgb.fns)) + for _, f := range *cslgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cslgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cslgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ContributorSocialLinkSelect is the builder for selecting fields of ContributorSocialLink entities. +type ContributorSocialLinkSelect struct { + *ContributorSocialLinkQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (csls *ContributorSocialLinkSelect) Aggregate(fns ...AggregateFunc) *ContributorSocialLinkSelect { + csls.fns = append(csls.fns, fns...) + return csls +} + +// Scan applies the selector query and scans the result into the given value. +func (csls *ContributorSocialLinkSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, csls.ctx, ent.OpQuerySelect) + if err := csls.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ContributorSocialLinkQuery, *ContributorSocialLinkSelect](ctx, csls.ContributorSocialLinkQuery, csls, csls.inters, v) +} + +func (csls *ContributorSocialLinkSelect) sqlScan(ctx context.Context, root *ContributorSocialLinkQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(csls.fns)) + for _, fn := range csls.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*csls.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := csls.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/contributorsociallink_update.go b/backend/ent/contributorsociallink_update.go new file mode 100644 index 0000000..20d641e --- /dev/null +++ b/backend/ent/contributorsociallink_update.go @@ -0,0 +1,440 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// ContributorSocialLinkUpdate is the builder for updating ContributorSocialLink entities. +type ContributorSocialLinkUpdate struct { + config + hooks []Hook + mutation *ContributorSocialLinkMutation +} + +// Where appends a list predicates to the ContributorSocialLinkUpdate builder. +func (cslu *ContributorSocialLinkUpdate) Where(ps ...predicate.ContributorSocialLink) *ContributorSocialLinkUpdate { + cslu.mutation.Where(ps...) + return cslu +} + +// SetType sets the "type" field. +func (cslu *ContributorSocialLinkUpdate) SetType(c contributorsociallink.Type) *ContributorSocialLinkUpdate { + cslu.mutation.SetType(c) + return cslu +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (cslu *ContributorSocialLinkUpdate) SetNillableType(c *contributorsociallink.Type) *ContributorSocialLinkUpdate { + if c != nil { + cslu.SetType(*c) + } + return cslu +} + +// SetName sets the "name" field. +func (cslu *ContributorSocialLinkUpdate) SetName(s string) *ContributorSocialLinkUpdate { + cslu.mutation.SetName(s) + return cslu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (cslu *ContributorSocialLinkUpdate) SetNillableName(s *string) *ContributorSocialLinkUpdate { + if s != nil { + cslu.SetName(*s) + } + return cslu +} + +// ClearName clears the value of the "name" field. +func (cslu *ContributorSocialLinkUpdate) ClearName() *ContributorSocialLinkUpdate { + cslu.mutation.ClearName() + return cslu +} + +// SetValue sets the "value" field. +func (cslu *ContributorSocialLinkUpdate) SetValue(s string) *ContributorSocialLinkUpdate { + cslu.mutation.SetValue(s) + return cslu +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (cslu *ContributorSocialLinkUpdate) SetNillableValue(s *string) *ContributorSocialLinkUpdate { + if s != nil { + cslu.SetValue(*s) + } + return cslu +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (cslu *ContributorSocialLinkUpdate) SetContributorID(id int) *ContributorSocialLinkUpdate { + cslu.mutation.SetContributorID(id) + return cslu +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (cslu *ContributorSocialLinkUpdate) SetNillableContributorID(id *int) *ContributorSocialLinkUpdate { + if id != nil { + cslu = cslu.SetContributorID(*id) + } + return cslu +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (cslu *ContributorSocialLinkUpdate) SetContributor(c *Contributor) *ContributorSocialLinkUpdate { + return cslu.SetContributorID(c.ID) +} + +// Mutation returns the ContributorSocialLinkMutation object of the builder. +func (cslu *ContributorSocialLinkUpdate) Mutation() *ContributorSocialLinkMutation { + return cslu.mutation +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (cslu *ContributorSocialLinkUpdate) ClearContributor() *ContributorSocialLinkUpdate { + cslu.mutation.ClearContributor() + return cslu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cslu *ContributorSocialLinkUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, cslu.sqlSave, cslu.mutation, cslu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cslu *ContributorSocialLinkUpdate) SaveX(ctx context.Context) int { + affected, err := cslu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cslu *ContributorSocialLinkUpdate) Exec(ctx context.Context) error { + _, err := cslu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cslu *ContributorSocialLinkUpdate) ExecX(ctx context.Context) { + if err := cslu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cslu *ContributorSocialLinkUpdate) check() error { + if v, ok := cslu.mutation.GetType(); ok { + if err := contributorsociallink.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.type": %w`, err)} + } + } + if v, ok := cslu.mutation.Value(); ok { + if err := contributorsociallink.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.value": %w`, err)} + } + } + return nil +} + +func (cslu *ContributorSocialLinkUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := cslu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(contributorsociallink.Table, contributorsociallink.Columns, sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt)) + if ps := cslu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cslu.mutation.GetType(); ok { + _spec.SetField(contributorsociallink.FieldType, field.TypeEnum, value) + } + if value, ok := cslu.mutation.Name(); ok { + _spec.SetField(contributorsociallink.FieldName, field.TypeString, value) + } + if cslu.mutation.NameCleared() { + _spec.ClearField(contributorsociallink.FieldName, field.TypeString) + } + if value, ok := cslu.mutation.Value(); ok { + _spec.SetField(contributorsociallink.FieldValue, field.TypeString, value) + } + if cslu.mutation.ContributorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributorsociallink.ContributorTable, + Columns: []string{contributorsociallink.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := cslu.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributorsociallink.ContributorTable, + Columns: []string{contributorsociallink.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, cslu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributorsociallink.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cslu.mutation.done = true + return n, nil +} + +// ContributorSocialLinkUpdateOne is the builder for updating a single ContributorSocialLink entity. +type ContributorSocialLinkUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ContributorSocialLinkMutation +} + +// SetType sets the "type" field. +func (csluo *ContributorSocialLinkUpdateOne) SetType(c contributorsociallink.Type) *ContributorSocialLinkUpdateOne { + csluo.mutation.SetType(c) + return csluo +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (csluo *ContributorSocialLinkUpdateOne) SetNillableType(c *contributorsociallink.Type) *ContributorSocialLinkUpdateOne { + if c != nil { + csluo.SetType(*c) + } + return csluo +} + +// SetName sets the "name" field. +func (csluo *ContributorSocialLinkUpdateOne) SetName(s string) *ContributorSocialLinkUpdateOne { + csluo.mutation.SetName(s) + return csluo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (csluo *ContributorSocialLinkUpdateOne) SetNillableName(s *string) *ContributorSocialLinkUpdateOne { + if s != nil { + csluo.SetName(*s) + } + return csluo +} + +// ClearName clears the value of the "name" field. +func (csluo *ContributorSocialLinkUpdateOne) ClearName() *ContributorSocialLinkUpdateOne { + csluo.mutation.ClearName() + return csluo +} + +// SetValue sets the "value" field. +func (csluo *ContributorSocialLinkUpdateOne) SetValue(s string) *ContributorSocialLinkUpdateOne { + csluo.mutation.SetValue(s) + return csluo +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (csluo *ContributorSocialLinkUpdateOne) SetNillableValue(s *string) *ContributorSocialLinkUpdateOne { + if s != nil { + csluo.SetValue(*s) + } + return csluo +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (csluo *ContributorSocialLinkUpdateOne) SetContributorID(id int) *ContributorSocialLinkUpdateOne { + csluo.mutation.SetContributorID(id) + return csluo +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (csluo *ContributorSocialLinkUpdateOne) SetNillableContributorID(id *int) *ContributorSocialLinkUpdateOne { + if id != nil { + csluo = csluo.SetContributorID(*id) + } + return csluo +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (csluo *ContributorSocialLinkUpdateOne) SetContributor(c *Contributor) *ContributorSocialLinkUpdateOne { + return csluo.SetContributorID(c.ID) +} + +// Mutation returns the ContributorSocialLinkMutation object of the builder. +func (csluo *ContributorSocialLinkUpdateOne) Mutation() *ContributorSocialLinkMutation { + return csluo.mutation +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (csluo *ContributorSocialLinkUpdateOne) ClearContributor() *ContributorSocialLinkUpdateOne { + csluo.mutation.ClearContributor() + return csluo +} + +// Where appends a list predicates to the ContributorSocialLinkUpdate builder. +func (csluo *ContributorSocialLinkUpdateOne) Where(ps ...predicate.ContributorSocialLink) *ContributorSocialLinkUpdateOne { + csluo.mutation.Where(ps...) + return csluo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (csluo *ContributorSocialLinkUpdateOne) Select(field string, fields ...string) *ContributorSocialLinkUpdateOne { + csluo.fields = append([]string{field}, fields...) + return csluo +} + +// Save executes the query and returns the updated ContributorSocialLink entity. +func (csluo *ContributorSocialLinkUpdateOne) Save(ctx context.Context) (*ContributorSocialLink, error) { + return withHooks(ctx, csluo.sqlSave, csluo.mutation, csluo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (csluo *ContributorSocialLinkUpdateOne) SaveX(ctx context.Context) *ContributorSocialLink { + node, err := csluo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (csluo *ContributorSocialLinkUpdateOne) Exec(ctx context.Context) error { + _, err := csluo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (csluo *ContributorSocialLinkUpdateOne) ExecX(ctx context.Context) { + if err := csluo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (csluo *ContributorSocialLinkUpdateOne) check() error { + if v, ok := csluo.mutation.GetType(); ok { + if err := contributorsociallink.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.type": %w`, err)} + } + } + if v, ok := csluo.mutation.Value(); ok { + if err := contributorsociallink.ValueValidator(v); err != nil { + return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "ContributorSocialLink.value": %w`, err)} + } + } + return nil +} + +func (csluo *ContributorSocialLinkUpdateOne) sqlSave(ctx context.Context) (_node *ContributorSocialLink, err error) { + if err := csluo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(contributorsociallink.Table, contributorsociallink.Columns, sqlgraph.NewFieldSpec(contributorsociallink.FieldID, field.TypeInt)) + id, ok := csluo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ContributorSocialLink.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := csluo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, contributorsociallink.FieldID) + for _, f := range fields { + if !contributorsociallink.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != contributorsociallink.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := csluo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := csluo.mutation.GetType(); ok { + _spec.SetField(contributorsociallink.FieldType, field.TypeEnum, value) + } + if value, ok := csluo.mutation.Name(); ok { + _spec.SetField(contributorsociallink.FieldName, field.TypeString, value) + } + if csluo.mutation.NameCleared() { + _spec.ClearField(contributorsociallink.FieldName, field.TypeString) + } + if value, ok := csluo.mutation.Value(); ok { + _spec.SetField(contributorsociallink.FieldValue, field.TypeString, value) + } + if csluo.mutation.ContributorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributorsociallink.ContributorTable, + Columns: []string{contributorsociallink.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := csluo.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: contributorsociallink.ContributorTable, + Columns: []string{contributorsociallink.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ContributorSocialLink{config: csluo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, csluo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{contributorsociallink.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + csluo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/daily.go b/backend/ent/daily.go new file mode 100644 index 0000000..a774eac --- /dev/null +++ b/backend/ent/daily.go @@ -0,0 +1,191 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/daily" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Daily is the model entity for the Daily schema. +type Daily struct { + config `json:"-"` + // ID of the ent. + ID string `json:"id,omitempty"` + // ImageURL holds the value of the "image_url" field. + ImageURL string `json:"image_url,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DailyQuery when eager-loading is set. + Edges DailyEdges `json:"edges"` + category_daily_items *int + daily_category_daily_items *int + selectValues sql.SelectValues +} + +// DailyEdges holds the relations/edges for other nodes in the graph. +type DailyEdges struct { + // Category holds the value of the category edge. + Category *Category `json:"category,omitempty"` + // Contents holds the value of the contents edge. + Contents []*DailyContent `json:"contents,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// CategoryOrErr returns the Category value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DailyEdges) CategoryOrErr() (*Category, error) { + if e.Category != nil { + return e.Category, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: category.Label} + } + return nil, &NotLoadedError{edge: "category"} +} + +// ContentsOrErr returns the Contents value or an error if the edge +// was not loaded in eager-loading. +func (e DailyEdges) ContentsOrErr() ([]*DailyContent, error) { + if e.loadedTypes[1] { + return e.Contents, nil + } + return nil, &NotLoadedError{edge: "contents"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Daily) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case daily.FieldID, daily.FieldImageURL: + values[i] = new(sql.NullString) + case daily.FieldCreatedAt, daily.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case daily.ForeignKeys[0]: // category_daily_items + values[i] = new(sql.NullInt64) + case daily.ForeignKeys[1]: // daily_category_daily_items + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Daily fields. +func (d *Daily) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case daily.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + d.ID = value.String + } + case daily.FieldImageURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field image_url", values[i]) + } else if value.Valid { + d.ImageURL = value.String + } + case daily.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + d.CreatedAt = value.Time + } + case daily.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + d.UpdatedAt = value.Time + } + case daily.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field category_daily_items", value) + } else if value.Valid { + d.category_daily_items = new(int) + *d.category_daily_items = int(value.Int64) + } + case daily.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field daily_category_daily_items", value) + } else if value.Valid { + d.daily_category_daily_items = new(int) + *d.daily_category_daily_items = int(value.Int64) + } + default: + d.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Daily. +// This includes values selected through modifiers, order, etc. +func (d *Daily) Value(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + +// QueryCategory queries the "category" edge of the Daily entity. +func (d *Daily) QueryCategory() *CategoryQuery { + return NewDailyClient(d.config).QueryCategory(d) +} + +// QueryContents queries the "contents" edge of the Daily entity. +func (d *Daily) QueryContents() *DailyContentQuery { + return NewDailyClient(d.config).QueryContents(d) +} + +// Update returns a builder for updating this Daily. +// Note that you need to call Daily.Unwrap() before calling this method if this Daily +// was returned from a transaction, and the transaction was committed or rolled back. +func (d *Daily) Update() *DailyUpdateOne { + return NewDailyClient(d.config).UpdateOne(d) +} + +// Unwrap unwraps the Daily entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (d *Daily) Unwrap() *Daily { + _tx, ok := d.config.driver.(*txDriver) + if !ok { + panic("ent: Daily is not a transactional entity") + } + d.config.driver = _tx.drv + return d +} + +// String implements the fmt.Stringer. +func (d *Daily) String() string { + var builder strings.Builder + builder.WriteString("Daily(") + builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) + builder.WriteString("image_url=") + builder.WriteString(d.ImageURL) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Dailies is a parsable slice of Daily. +type Dailies []*Daily diff --git a/backend/ent/daily/daily.go b/backend/ent/daily/daily.go new file mode 100644 index 0000000..65510ba --- /dev/null +++ b/backend/ent/daily/daily.go @@ -0,0 +1,144 @@ +// Code generated by ent, DO NOT EDIT. + +package daily + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the daily type in the database. + Label = "daily" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldImageURL holds the string denoting the image_url field in the database. + FieldImageURL = "image_url" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeCategory holds the string denoting the category edge name in mutations. + EdgeCategory = "category" + // EdgeContents holds the string denoting the contents edge name in mutations. + EdgeContents = "contents" + // Table holds the table name of the daily in the database. + Table = "dailies" + // CategoryTable is the table that holds the category relation/edge. + CategoryTable = "dailies" + // CategoryInverseTable is the table name for the Category entity. + // It exists in this package in order to avoid circular dependency with the "category" package. + CategoryInverseTable = "categories" + // CategoryColumn is the table column denoting the category relation/edge. + CategoryColumn = "category_daily_items" + // ContentsTable is the table that holds the contents relation/edge. + ContentsTable = "daily_contents" + // ContentsInverseTable is the table name for the DailyContent entity. + // It exists in this package in order to avoid circular dependency with the "dailycontent" package. + ContentsInverseTable = "daily_contents" + // ContentsColumn is the table column denoting the contents relation/edge. + ContentsColumn = "daily_contents" +) + +// Columns holds all SQL columns for daily fields. +var Columns = []string{ + FieldID, + FieldImageURL, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "dailies" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "category_daily_items", + "daily_category_daily_items", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // ImageURLValidator is a validator for the "image_url" field. It is called by the builders before save. + ImageURLValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // IDValidator is a validator for the "id" field. It is called by the builders before save. + IDValidator func(string) error +) + +// OrderOption defines the ordering options for the Daily queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByImageURL orders the results by the image_url field. +func ByImageURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImageURL, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByCategoryField orders the results by category field. +func ByCategoryField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCategoryStep(), sql.OrderByField(field, opts...)) + } +} + +// ByContentsCount orders the results by contents count. +func ByContentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContentsStep(), opts...) + } +} + +// ByContents orders the results by contents terms. +func ByContents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newCategoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CategoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) +} +func newContentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) +} diff --git a/backend/ent/daily/where.go b/backend/ent/daily/where.go new file mode 100644 index 0000000..b029093 --- /dev/null +++ b/backend/ent/daily/where.go @@ -0,0 +1,287 @@ +// Code generated by ent, DO NOT EDIT. + +package daily + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Daily { + return predicate.Daily(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Daily { + return predicate.Daily(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Daily { + return predicate.Daily(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Daily { + return predicate.Daily(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Daily { + return predicate.Daily(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Daily { + return predicate.Daily(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Daily { + return predicate.Daily(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Daily { + return predicate.Daily(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Daily { + return predicate.Daily(sql.FieldContainsFold(FieldID, id)) +} + +// ImageURL applies equality check predicate on the "image_url" field. It's identical to ImageURLEQ. +func ImageURL(v string) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldImageURL, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// ImageURLEQ applies the EQ predicate on the "image_url" field. +func ImageURLEQ(v string) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldImageURL, v)) +} + +// ImageURLNEQ applies the NEQ predicate on the "image_url" field. +func ImageURLNEQ(v string) predicate.Daily { + return predicate.Daily(sql.FieldNEQ(FieldImageURL, v)) +} + +// ImageURLIn applies the In predicate on the "image_url" field. +func ImageURLIn(vs ...string) predicate.Daily { + return predicate.Daily(sql.FieldIn(FieldImageURL, vs...)) +} + +// ImageURLNotIn applies the NotIn predicate on the "image_url" field. +func ImageURLNotIn(vs ...string) predicate.Daily { + return predicate.Daily(sql.FieldNotIn(FieldImageURL, vs...)) +} + +// ImageURLGT applies the GT predicate on the "image_url" field. +func ImageURLGT(v string) predicate.Daily { + return predicate.Daily(sql.FieldGT(FieldImageURL, v)) +} + +// ImageURLGTE applies the GTE predicate on the "image_url" field. +func ImageURLGTE(v string) predicate.Daily { + return predicate.Daily(sql.FieldGTE(FieldImageURL, v)) +} + +// ImageURLLT applies the LT predicate on the "image_url" field. +func ImageURLLT(v string) predicate.Daily { + return predicate.Daily(sql.FieldLT(FieldImageURL, v)) +} + +// ImageURLLTE applies the LTE predicate on the "image_url" field. +func ImageURLLTE(v string) predicate.Daily { + return predicate.Daily(sql.FieldLTE(FieldImageURL, v)) +} + +// ImageURLContains applies the Contains predicate on the "image_url" field. +func ImageURLContains(v string) predicate.Daily { + return predicate.Daily(sql.FieldContains(FieldImageURL, v)) +} + +// ImageURLHasPrefix applies the HasPrefix predicate on the "image_url" field. +func ImageURLHasPrefix(v string) predicate.Daily { + return predicate.Daily(sql.FieldHasPrefix(FieldImageURL, v)) +} + +// ImageURLHasSuffix applies the HasSuffix predicate on the "image_url" field. +func ImageURLHasSuffix(v string) predicate.Daily { + return predicate.Daily(sql.FieldHasSuffix(FieldImageURL, v)) +} + +// ImageURLEqualFold applies the EqualFold predicate on the "image_url" field. +func ImageURLEqualFold(v string) predicate.Daily { + return predicate.Daily(sql.FieldEqualFold(FieldImageURL, v)) +} + +// ImageURLContainsFold applies the ContainsFold predicate on the "image_url" field. +func ImageURLContainsFold(v string) predicate.Daily { + return predicate.Daily(sql.FieldContainsFold(FieldImageURL, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Daily { + return predicate.Daily(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Daily { + return predicate.Daily(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Daily { + return predicate.Daily(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Daily { + return predicate.Daily(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Daily { + return predicate.Daily(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasCategory applies the HasEdge predicate on the "category" edge. +func HasCategory() predicate.Daily { + return predicate.Daily(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasCategoryWith applies the HasEdge predicate on the "category" edge with a given conditions (other predicates). +func HasCategoryWith(preds ...predicate.Category) predicate.Daily { + return predicate.Daily(func(s *sql.Selector) { + step := newCategoryStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasContents applies the HasEdge predicate on the "contents" edge. +func HasContents() predicate.Daily { + return predicate.Daily(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContentsWith applies the HasEdge predicate on the "contents" edge with a given conditions (other predicates). +func HasContentsWith(preds ...predicate.DailyContent) predicate.Daily { + return predicate.Daily(func(s *sql.Selector) { + step := newContentsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Daily) predicate.Daily { + return predicate.Daily(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Daily) predicate.Daily { + return predicate.Daily(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Daily) predicate.Daily { + return predicate.Daily(sql.NotPredicates(p)) +} diff --git a/backend/ent/daily_create.go b/backend/ent/daily_create.go new file mode 100644 index 0000000..4c996f1 --- /dev/null +++ b/backend/ent/daily_create.go @@ -0,0 +1,325 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCreate is the builder for creating a Daily entity. +type DailyCreate struct { + config + mutation *DailyMutation + hooks []Hook +} + +// SetImageURL sets the "image_url" field. +func (dc *DailyCreate) SetImageURL(s string) *DailyCreate { + dc.mutation.SetImageURL(s) + return dc +} + +// SetCreatedAt sets the "created_at" field. +func (dc *DailyCreate) SetCreatedAt(t time.Time) *DailyCreate { + dc.mutation.SetCreatedAt(t) + return dc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dc *DailyCreate) SetNillableCreatedAt(t *time.Time) *DailyCreate { + if t != nil { + dc.SetCreatedAt(*t) + } + return dc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dc *DailyCreate) SetUpdatedAt(t time.Time) *DailyCreate { + dc.mutation.SetUpdatedAt(t) + return dc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dc *DailyCreate) SetNillableUpdatedAt(t *time.Time) *DailyCreate { + if t != nil { + dc.SetUpdatedAt(*t) + } + return dc +} + +// SetID sets the "id" field. +func (dc *DailyCreate) SetID(s string) *DailyCreate { + dc.mutation.SetID(s) + return dc +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (dc *DailyCreate) SetCategoryID(id int) *DailyCreate { + dc.mutation.SetCategoryID(id) + return dc +} + +// SetCategory sets the "category" edge to the Category entity. +func (dc *DailyCreate) SetCategory(c *Category) *DailyCreate { + return dc.SetCategoryID(c.ID) +} + +// AddContentIDs adds the "contents" edge to the DailyContent entity by IDs. +func (dc *DailyCreate) AddContentIDs(ids ...int) *DailyCreate { + dc.mutation.AddContentIDs(ids...) + return dc +} + +// AddContents adds the "contents" edges to the DailyContent entity. +func (dc *DailyCreate) AddContents(d ...*DailyContent) *DailyCreate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dc.AddContentIDs(ids...) +} + +// Mutation returns the DailyMutation object of the builder. +func (dc *DailyCreate) Mutation() *DailyMutation { + return dc.mutation +} + +// Save creates the Daily in the database. +func (dc *DailyCreate) Save(ctx context.Context) (*Daily, error) { + dc.defaults() + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dc *DailyCreate) SaveX(ctx context.Context) *Daily { + v, err := dc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dc *DailyCreate) Exec(ctx context.Context) error { + _, err := dc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dc *DailyCreate) ExecX(ctx context.Context) { + if err := dc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dc *DailyCreate) defaults() { + if _, ok := dc.mutation.CreatedAt(); !ok { + v := daily.DefaultCreatedAt() + dc.mutation.SetCreatedAt(v) + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + v := daily.DefaultUpdatedAt() + dc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dc *DailyCreate) check() error { + if _, ok := dc.mutation.ImageURL(); !ok { + return &ValidationError{Name: "image_url", err: errors.New(`ent: missing required field "Daily.image_url"`)} + } + if v, ok := dc.mutation.ImageURL(); ok { + if err := daily.ImageURLValidator(v); err != nil { + return &ValidationError{Name: "image_url", err: fmt.Errorf(`ent: validator failed for field "Daily.image_url": %w`, err)} + } + } + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Daily.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Daily.updated_at"`)} + } + if v, ok := dc.mutation.ID(); ok { + if err := daily.IDValidator(v); err != nil { + return &ValidationError{Name: "id", err: fmt.Errorf(`ent: validator failed for field "Daily.id": %w`, err)} + } + } + if len(dc.mutation.CategoryIDs()) == 0 { + return &ValidationError{Name: "category", err: errors.New(`ent: missing required edge "Daily.category"`)} + } + return nil +} + +func (dc *DailyCreate) sqlSave(ctx context.Context) (*Daily, error) { + if err := dc.check(); err != nil { + return nil, err + } + _node, _spec := dc.createSpec() + if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Daily.ID type: %T", _spec.ID.Value) + } + } + dc.mutation.id = &_node.ID + dc.mutation.done = true + return _node, nil +} + +func (dc *DailyCreate) createSpec() (*Daily, *sqlgraph.CreateSpec) { + var ( + _node = &Daily{config: dc.config} + _spec = sqlgraph.NewCreateSpec(daily.Table, sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString)) + ) + if id, ok := dc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := dc.mutation.ImageURL(); ok { + _spec.SetField(daily.FieldImageURL, field.TypeString, value) + _node.ImageURL = value + } + if value, ok := dc.mutation.CreatedAt(); ok { + _spec.SetField(daily.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := dc.mutation.UpdatedAt(); ok { + _spec.SetField(daily.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := dc.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: daily.CategoryTable, + Columns: []string{daily.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.category_daily_items = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := dc.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DailyCreateBulk is the builder for creating many Daily entities in bulk. +type DailyCreateBulk struct { + config + err error + builders []*DailyCreate +} + +// Save creates the Daily entities in the database. +func (dcb *DailyCreateBulk) Save(ctx context.Context) ([]*Daily, error) { + if dcb.err != nil { + return nil, dcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) + nodes := make([]*Daily, len(dcb.builders)) + mutators := make([]Mutator, len(dcb.builders)) + for i := range dcb.builders { + func(i int, root context.Context) { + builder := dcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DailyMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcb *DailyCreateBulk) SaveX(ctx context.Context) []*Daily { + v, err := dcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcb *DailyCreateBulk) Exec(ctx context.Context) error { + _, err := dcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcb *DailyCreateBulk) ExecX(ctx context.Context) { + if err := dcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/daily_delete.go b/backend/ent/daily_delete.go new file mode 100644 index 0000000..62a683e --- /dev/null +++ b/backend/ent/daily_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyDelete is the builder for deleting a Daily entity. +type DailyDelete struct { + config + hooks []Hook + mutation *DailyMutation +} + +// Where appends a list predicates to the DailyDelete builder. +func (dd *DailyDelete) Where(ps ...predicate.Daily) *DailyDelete { + dd.mutation.Where(ps...) + return dd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dd *DailyDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dd *DailyDelete) ExecX(ctx context.Context) int { + n, err := dd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dd *DailyDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(daily.Table, sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString)) + if ps := dd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dd.mutation.done = true + return affected, err +} + +// DailyDeleteOne is the builder for deleting a single Daily entity. +type DailyDeleteOne struct { + dd *DailyDelete +} + +// Where appends a list predicates to the DailyDelete builder. +func (ddo *DailyDeleteOne) Where(ps ...predicate.Daily) *DailyDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + +// Exec executes the deletion query. +func (ddo *DailyDeleteOne) Exec(ctx context.Context) error { + n, err := ddo.dd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{daily.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ddo *DailyDeleteOne) ExecX(ctx context.Context) { + if err := ddo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/daily_query.go b/backend/ent/daily_query.go new file mode 100644 index 0000000..f2a33c3 --- /dev/null +++ b/backend/ent/daily_query.go @@ -0,0 +1,690 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyQuery is the builder for querying Daily entities. +type DailyQuery struct { + config + ctx *QueryContext + order []daily.OrderOption + inters []Interceptor + predicates []predicate.Daily + withCategory *CategoryQuery + withContents *DailyContentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DailyQuery builder. +func (dq *DailyQuery) Where(ps ...predicate.Daily) *DailyQuery { + dq.predicates = append(dq.predicates, ps...) + return dq +} + +// Limit the number of records to be returned by this query. +func (dq *DailyQuery) Limit(limit int) *DailyQuery { + dq.ctx.Limit = &limit + return dq +} + +// Offset to start from. +func (dq *DailyQuery) Offset(offset int) *DailyQuery { + dq.ctx.Offset = &offset + return dq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dq *DailyQuery) Unique(unique bool) *DailyQuery { + dq.ctx.Unique = &unique + return dq +} + +// Order specifies how the records should be ordered. +func (dq *DailyQuery) Order(o ...daily.OrderOption) *DailyQuery { + dq.order = append(dq.order, o...) + return dq +} + +// QueryCategory chains the current query on the "category" edge. +func (dq *DailyQuery) QueryCategory() *CategoryQuery { + query := (&CategoryClient{config: dq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(daily.Table, daily.FieldID, selector), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, daily.CategoryTable, daily.CategoryColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryContents chains the current query on the "contents" edge. +func (dq *DailyQuery) QueryContents() *DailyContentQuery { + query := (&DailyContentClient{config: dq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(daily.Table, daily.FieldID, selector), + sqlgraph.To(dailycontent.Table, dailycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, daily.ContentsTable, daily.ContentsColumn), + ) + fromU = sqlgraph.SetNeighbors(dq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Daily entity from the query. +// Returns a *NotFoundError when no Daily was found. +func (dq *DailyQuery) First(ctx context.Context) (*Daily, error) { + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{daily.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dq *DailyQuery) FirstX(ctx context.Context) *Daily { + node, err := dq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Daily ID from the query. +// Returns a *NotFoundError when no Daily ID was found. +func (dq *DailyQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{daily.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dq *DailyQuery) FirstIDX(ctx context.Context) string { + id, err := dq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Daily entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Daily entity is found. +// Returns a *NotFoundError when no Daily entities are found. +func (dq *DailyQuery) Only(ctx context.Context) (*Daily, error) { + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{daily.Label} + default: + return nil, &NotSingularError{daily.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dq *DailyQuery) OnlyX(ctx context.Context) *Daily { + node, err := dq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Daily ID in the query. +// Returns a *NotSingularError when more than one Daily ID is found. +// Returns a *NotFoundError when no entities are found. +func (dq *DailyQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{daily.Label} + default: + err = &NotSingularError{daily.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dq *DailyQuery) OnlyIDX(ctx context.Context) string { + id, err := dq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Dailies. +func (dq *DailyQuery) All(ctx context.Context) ([]*Daily, error) { + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryAll) + if err := dq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Daily, *DailyQuery]() + return withInterceptors[[]*Daily](ctx, dq, qr, dq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dq *DailyQuery) AllX(ctx context.Context) []*Daily { + nodes, err := dq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Daily IDs. +func (dq *DailyQuery) IDs(ctx context.Context) (ids []string, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryIDs) + if err = dq.Select(daily.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dq *DailyQuery) IDsX(ctx context.Context) []string { + ids, err := dq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dq *DailyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryCount) + if err := dq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dq, querierCount[*DailyQuery](), dq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dq *DailyQuery) CountX(ctx context.Context) int { + count, err := dq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dq *DailyQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryExist) + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dq *DailyQuery) ExistX(ctx context.Context) bool { + exist, err := dq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DailyQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dq *DailyQuery) Clone() *DailyQuery { + if dq == nil { + return nil + } + return &DailyQuery{ + config: dq.config, + ctx: dq.ctx.Clone(), + order: append([]daily.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), + predicates: append([]predicate.Daily{}, dq.predicates...), + withCategory: dq.withCategory.Clone(), + withContents: dq.withContents.Clone(), + // clone intermediate query. + sql: dq.sql.Clone(), + path: dq.path, + } +} + +// WithCategory tells the query-builder to eager-load the nodes that are connected to +// the "category" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DailyQuery) WithCategory(opts ...func(*CategoryQuery)) *DailyQuery { + query := (&CategoryClient{config: dq.config}).Query() + for _, opt := range opts { + opt(query) + } + dq.withCategory = query + return dq +} + +// WithContents tells the query-builder to eager-load the nodes that are connected to +// the "contents" edge. The optional arguments are used to configure the query builder of the edge. +func (dq *DailyQuery) WithContents(opts ...func(*DailyContentQuery)) *DailyQuery { + query := (&DailyContentClient{config: dq.config}).Query() + for _, opt := range opts { + opt(query) + } + dq.withContents = query + return dq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// ImageURL string `json:"image_url,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Daily.Query(). +// GroupBy(daily.FieldImageURL). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dq *DailyQuery) GroupBy(field string, fields ...string) *DailyGroupBy { + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DailyGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields + grbuild.label = daily.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// ImageURL string `json:"image_url,omitempty"` +// } +// +// client.Daily.Query(). +// Select(daily.FieldImageURL). +// Scan(ctx, &v) +func (dq *DailyQuery) Select(fields ...string) *DailySelect { + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DailySelect{DailyQuery: dq} + sbuild.label = daily.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DailySelect configured with the given aggregations. +func (dq *DailyQuery) Aggregate(fns ...AggregateFunc) *DailySelect { + return dq.Select().Aggregate(fns...) +} + +func (dq *DailyQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { + if !daily.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dq.path != nil { + prev, err := dq.path(ctx) + if err != nil { + return err + } + dq.sql = prev + } + return nil +} + +func (dq *DailyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Daily, error) { + var ( + nodes = []*Daily{} + withFKs = dq.withFKs + _spec = dq.querySpec() + loadedTypes = [2]bool{ + dq.withCategory != nil, + dq.withContents != nil, + } + ) + if dq.withCategory != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, daily.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Daily).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Daily{config: dq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dq.withCategory; query != nil { + if err := dq.loadCategory(ctx, query, nodes, nil, + func(n *Daily, e *Category) { n.Edges.Category = e }); err != nil { + return nil, err + } + } + if query := dq.withContents; query != nil { + if err := dq.loadContents(ctx, query, nodes, + func(n *Daily) { n.Edges.Contents = []*DailyContent{} }, + func(n *Daily, e *DailyContent) { n.Edges.Contents = append(n.Edges.Contents, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dq *DailyQuery) loadCategory(ctx context.Context, query *CategoryQuery, nodes []*Daily, init func(*Daily), assign func(*Daily, *Category)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Daily) + for i := range nodes { + if nodes[i].category_daily_items == nil { + continue + } + fk := *nodes[i].category_daily_items + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(category.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "category_daily_items" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (dq *DailyQuery) loadContents(ctx context.Context, query *DailyContentQuery, nodes []*Daily, init func(*Daily), assign func(*Daily, *DailyContent)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[string]*Daily) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.DailyContent(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(daily.ContentsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.daily_contents + if fk == nil { + return fmt.Errorf(`foreign-key "daily_contents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "daily_contents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (dq *DailyQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dq.querySpec() + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dq.driver, _spec) +} + +func (dq *DailyQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(daily.Table, daily.Columns, sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true + } + if fields := dq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, daily.FieldID) + for i := range fields { + if fields[i] != daily.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dq *DailyQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dq.driver.Dialect()) + t1 := builder.Table(daily.Table) + columns := dq.ctx.Fields + if len(columns) == 0 { + columns = daily.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dq.sql != nil { + selector = dq.sql + selector.Select(selector.Columns(columns...)...) + } + if dq.ctx.Unique != nil && *dq.ctx.Unique { + selector.Distinct() + } + for _, p := range dq.predicates { + p(selector) + } + for _, p := range dq.order { + p(selector) + } + if offset := dq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DailyGroupBy is the group-by builder for Daily entities. +type DailyGroupBy struct { + selector + build *DailyQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dgb *DailyGroupBy) Aggregate(fns ...AggregateFunc) *DailyGroupBy { + dgb.fns = append(dgb.fns, fns...) + return dgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dgb *DailyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dgb.build.ctx, ent.OpQueryGroupBy) + if err := dgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyQuery, *DailyGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) +} + +func (dgb *DailyGroupBy) sqlScan(ctx context.Context, root *DailyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DailySelect is the builder for selecting fields of Daily entities. +type DailySelect struct { + *DailyQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DailySelect) Aggregate(fns ...AggregateFunc) *DailySelect { + ds.fns = append(ds.fns, fns...) + return ds +} + +// Scan applies the selector query and scans the result into the given value. +func (ds *DailySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, ent.OpQuerySelect) + if err := ds.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyQuery, *DailySelect](ctx, ds.DailyQuery, ds, ds.inters, v) +} + +func (ds *DailySelect) sqlScan(ctx context.Context, root *DailyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ds.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/daily_update.go b/backend/ent/daily_update.go new file mode 100644 index 0000000..674d6c3 --- /dev/null +++ b/backend/ent/daily_update.go @@ -0,0 +1,568 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyUpdate is the builder for updating Daily entities. +type DailyUpdate struct { + config + hooks []Hook + mutation *DailyMutation +} + +// Where appends a list predicates to the DailyUpdate builder. +func (du *DailyUpdate) Where(ps ...predicate.Daily) *DailyUpdate { + du.mutation.Where(ps...) + return du +} + +// SetImageURL sets the "image_url" field. +func (du *DailyUpdate) SetImageURL(s string) *DailyUpdate { + du.mutation.SetImageURL(s) + return du +} + +// SetNillableImageURL sets the "image_url" field if the given value is not nil. +func (du *DailyUpdate) SetNillableImageURL(s *string) *DailyUpdate { + if s != nil { + du.SetImageURL(*s) + } + return du +} + +// SetCreatedAt sets the "created_at" field. +func (du *DailyUpdate) SetCreatedAt(t time.Time) *DailyUpdate { + du.mutation.SetCreatedAt(t) + return du +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (du *DailyUpdate) SetNillableCreatedAt(t *time.Time) *DailyUpdate { + if t != nil { + du.SetCreatedAt(*t) + } + return du +} + +// SetUpdatedAt sets the "updated_at" field. +func (du *DailyUpdate) SetUpdatedAt(t time.Time) *DailyUpdate { + du.mutation.SetUpdatedAt(t) + return du +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (du *DailyUpdate) SetCategoryID(id int) *DailyUpdate { + du.mutation.SetCategoryID(id) + return du +} + +// SetCategory sets the "category" edge to the Category entity. +func (du *DailyUpdate) SetCategory(c *Category) *DailyUpdate { + return du.SetCategoryID(c.ID) +} + +// AddContentIDs adds the "contents" edge to the DailyContent entity by IDs. +func (du *DailyUpdate) AddContentIDs(ids ...int) *DailyUpdate { + du.mutation.AddContentIDs(ids...) + return du +} + +// AddContents adds the "contents" edges to the DailyContent entity. +func (du *DailyUpdate) AddContents(d ...*DailyContent) *DailyUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.AddContentIDs(ids...) +} + +// Mutation returns the DailyMutation object of the builder. +func (du *DailyUpdate) Mutation() *DailyMutation { + return du.mutation +} + +// ClearCategory clears the "category" edge to the Category entity. +func (du *DailyUpdate) ClearCategory() *DailyUpdate { + du.mutation.ClearCategory() + return du +} + +// ClearContents clears all "contents" edges to the DailyContent entity. +func (du *DailyUpdate) ClearContents() *DailyUpdate { + du.mutation.ClearContents() + return du +} + +// RemoveContentIDs removes the "contents" edge to DailyContent entities by IDs. +func (du *DailyUpdate) RemoveContentIDs(ids ...int) *DailyUpdate { + du.mutation.RemoveContentIDs(ids...) + return du +} + +// RemoveContents removes "contents" edges to DailyContent entities. +func (du *DailyUpdate) RemoveContents(d ...*DailyContent) *DailyUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return du.RemoveContentIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (du *DailyUpdate) Save(ctx context.Context) (int, error) { + du.defaults() + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (du *DailyUpdate) SaveX(ctx context.Context) int { + affected, err := du.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (du *DailyUpdate) Exec(ctx context.Context) error { + _, err := du.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (du *DailyUpdate) ExecX(ctx context.Context) { + if err := du.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (du *DailyUpdate) defaults() { + if _, ok := du.mutation.UpdatedAt(); !ok { + v := daily.UpdateDefaultUpdatedAt() + du.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (du *DailyUpdate) check() error { + if v, ok := du.mutation.ImageURL(); ok { + if err := daily.ImageURLValidator(v); err != nil { + return &ValidationError{Name: "image_url", err: fmt.Errorf(`ent: validator failed for field "Daily.image_url": %w`, err)} + } + } + if du.mutation.CategoryCleared() && len(du.mutation.CategoryIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "Daily.category"`) + } + return nil +} + +func (du *DailyUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := du.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(daily.Table, daily.Columns, sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString)) + if ps := du.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := du.mutation.ImageURL(); ok { + _spec.SetField(daily.FieldImageURL, field.TypeString, value) + } + if value, ok := du.mutation.CreatedAt(); ok { + _spec.SetField(daily.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := du.mutation.UpdatedAt(); ok { + _spec.SetField(daily.FieldUpdatedAt, field.TypeTime, value) + } + if du.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: daily.CategoryTable, + Columns: []string{daily.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: daily.CategoryTable, + Columns: []string{daily.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if du.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.RemovedContentsIDs(); len(nodes) > 0 && !du.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := du.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, du.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{daily.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + du.mutation.done = true + return n, nil +} + +// DailyUpdateOne is the builder for updating a single Daily entity. +type DailyUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DailyMutation +} + +// SetImageURL sets the "image_url" field. +func (duo *DailyUpdateOne) SetImageURL(s string) *DailyUpdateOne { + duo.mutation.SetImageURL(s) + return duo +} + +// SetNillableImageURL sets the "image_url" field if the given value is not nil. +func (duo *DailyUpdateOne) SetNillableImageURL(s *string) *DailyUpdateOne { + if s != nil { + duo.SetImageURL(*s) + } + return duo +} + +// SetCreatedAt sets the "created_at" field. +func (duo *DailyUpdateOne) SetCreatedAt(t time.Time) *DailyUpdateOne { + duo.mutation.SetCreatedAt(t) + return duo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (duo *DailyUpdateOne) SetNillableCreatedAt(t *time.Time) *DailyUpdateOne { + if t != nil { + duo.SetCreatedAt(*t) + } + return duo +} + +// SetUpdatedAt sets the "updated_at" field. +func (duo *DailyUpdateOne) SetUpdatedAt(t time.Time) *DailyUpdateOne { + duo.mutation.SetUpdatedAt(t) + return duo +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (duo *DailyUpdateOne) SetCategoryID(id int) *DailyUpdateOne { + duo.mutation.SetCategoryID(id) + return duo +} + +// SetCategory sets the "category" edge to the Category entity. +func (duo *DailyUpdateOne) SetCategory(c *Category) *DailyUpdateOne { + return duo.SetCategoryID(c.ID) +} + +// AddContentIDs adds the "contents" edge to the DailyContent entity by IDs. +func (duo *DailyUpdateOne) AddContentIDs(ids ...int) *DailyUpdateOne { + duo.mutation.AddContentIDs(ids...) + return duo +} + +// AddContents adds the "contents" edges to the DailyContent entity. +func (duo *DailyUpdateOne) AddContents(d ...*DailyContent) *DailyUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.AddContentIDs(ids...) +} + +// Mutation returns the DailyMutation object of the builder. +func (duo *DailyUpdateOne) Mutation() *DailyMutation { + return duo.mutation +} + +// ClearCategory clears the "category" edge to the Category entity. +func (duo *DailyUpdateOne) ClearCategory() *DailyUpdateOne { + duo.mutation.ClearCategory() + return duo +} + +// ClearContents clears all "contents" edges to the DailyContent entity. +func (duo *DailyUpdateOne) ClearContents() *DailyUpdateOne { + duo.mutation.ClearContents() + return duo +} + +// RemoveContentIDs removes the "contents" edge to DailyContent entities by IDs. +func (duo *DailyUpdateOne) RemoveContentIDs(ids ...int) *DailyUpdateOne { + duo.mutation.RemoveContentIDs(ids...) + return duo +} + +// RemoveContents removes "contents" edges to DailyContent entities. +func (duo *DailyUpdateOne) RemoveContents(d ...*DailyContent) *DailyUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return duo.RemoveContentIDs(ids...) +} + +// Where appends a list predicates to the DailyUpdate builder. +func (duo *DailyUpdateOne) Where(ps ...predicate.Daily) *DailyUpdateOne { + duo.mutation.Where(ps...) + return duo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (duo *DailyUpdateOne) Select(field string, fields ...string) *DailyUpdateOne { + duo.fields = append([]string{field}, fields...) + return duo +} + +// Save executes the query and returns the updated Daily entity. +func (duo *DailyUpdateOne) Save(ctx context.Context) (*Daily, error) { + duo.defaults() + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (duo *DailyUpdateOne) SaveX(ctx context.Context) *Daily { + node, err := duo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (duo *DailyUpdateOne) Exec(ctx context.Context) error { + _, err := duo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (duo *DailyUpdateOne) ExecX(ctx context.Context) { + if err := duo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (duo *DailyUpdateOne) defaults() { + if _, ok := duo.mutation.UpdatedAt(); !ok { + v := daily.UpdateDefaultUpdatedAt() + duo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (duo *DailyUpdateOne) check() error { + if v, ok := duo.mutation.ImageURL(); ok { + if err := daily.ImageURLValidator(v); err != nil { + return &ValidationError{Name: "image_url", err: fmt.Errorf(`ent: validator failed for field "Daily.image_url": %w`, err)} + } + } + if duo.mutation.CategoryCleared() && len(duo.mutation.CategoryIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "Daily.category"`) + } + return nil +} + +func (duo *DailyUpdateOne) sqlSave(ctx context.Context) (_node *Daily, err error) { + if err := duo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(daily.Table, daily.Columns, sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString)) + id, ok := duo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Daily.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := duo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, daily.FieldID) + for _, f := range fields { + if !daily.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != daily.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := duo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := duo.mutation.ImageURL(); ok { + _spec.SetField(daily.FieldImageURL, field.TypeString, value) + } + if value, ok := duo.mutation.CreatedAt(); ok { + _spec.SetField(daily.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := duo.mutation.UpdatedAt(); ok { + _spec.SetField(daily.FieldUpdatedAt, field.TypeTime, value) + } + if duo.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: daily.CategoryTable, + Columns: []string{daily.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: daily.CategoryTable, + Columns: []string{daily.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if duo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.RemovedContentsIDs(); len(nodes) > 0 && !duo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := duo.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: daily.ContentsTable, + Columns: []string{daily.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Daily{config: duo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, duo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{daily.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + duo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/dailycategory.go b/backend/ent/dailycategory.go new file mode 100644 index 0000000..604e67e --- /dev/null +++ b/backend/ent/dailycategory.go @@ -0,0 +1,157 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/dailycategory" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// DailyCategory is the model entity for the DailyCategory schema. +type DailyCategory struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DailyCategoryQuery when eager-loading is set. + Edges DailyCategoryEdges `json:"edges"` + selectValues sql.SelectValues +} + +// DailyCategoryEdges holds the relations/edges for other nodes in the graph. +type DailyCategoryEdges struct { + // Contents holds the value of the contents edge. + Contents []*DailyCategoryContent `json:"contents,omitempty"` + // DailyItems holds the value of the daily_items edge. + DailyItems []*Daily `json:"daily_items,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// ContentsOrErr returns the Contents value or an error if the edge +// was not loaded in eager-loading. +func (e DailyCategoryEdges) ContentsOrErr() ([]*DailyCategoryContent, error) { + if e.loadedTypes[0] { + return e.Contents, nil + } + return nil, &NotLoadedError{edge: "contents"} +} + +// DailyItemsOrErr returns the DailyItems value or an error if the edge +// was not loaded in eager-loading. +func (e DailyCategoryEdges) DailyItemsOrErr() ([]*Daily, error) { + if e.loadedTypes[1] { + return e.DailyItems, nil + } + return nil, &NotLoadedError{edge: "daily_items"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DailyCategory) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case dailycategory.FieldID: + values[i] = new(sql.NullInt64) + case dailycategory.FieldCreatedAt, dailycategory.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DailyCategory fields. +func (dc *DailyCategory) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case dailycategory.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + dc.ID = int(value.Int64) + case dailycategory.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + dc.CreatedAt = value.Time + } + case dailycategory.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + dc.UpdatedAt = value.Time + } + default: + dc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the DailyCategory. +// This includes values selected through modifiers, order, etc. +func (dc *DailyCategory) Value(name string) (ent.Value, error) { + return dc.selectValues.Get(name) +} + +// QueryContents queries the "contents" edge of the DailyCategory entity. +func (dc *DailyCategory) QueryContents() *DailyCategoryContentQuery { + return NewDailyCategoryClient(dc.config).QueryContents(dc) +} + +// QueryDailyItems queries the "daily_items" edge of the DailyCategory entity. +func (dc *DailyCategory) QueryDailyItems() *DailyQuery { + return NewDailyCategoryClient(dc.config).QueryDailyItems(dc) +} + +// Update returns a builder for updating this DailyCategory. +// Note that you need to call DailyCategory.Unwrap() before calling this method if this DailyCategory +// was returned from a transaction, and the transaction was committed or rolled back. +func (dc *DailyCategory) Update() *DailyCategoryUpdateOne { + return NewDailyCategoryClient(dc.config).UpdateOne(dc) +} + +// Unwrap unwraps the DailyCategory entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dc *DailyCategory) Unwrap() *DailyCategory { + _tx, ok := dc.config.driver.(*txDriver) + if !ok { + panic("ent: DailyCategory is not a transactional entity") + } + dc.config.driver = _tx.drv + return dc +} + +// String implements the fmt.Stringer. +func (dc *DailyCategory) String() string { + var builder strings.Builder + builder.WriteString("DailyCategory(") + builder.WriteString(fmt.Sprintf("id=%v, ", dc.ID)) + builder.WriteString("created_at=") + builder.WriteString(dc.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(dc.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// DailyCategories is a parsable slice of DailyCategory. +type DailyCategories []*DailyCategory diff --git a/backend/ent/dailycategory/dailycategory.go b/backend/ent/dailycategory/dailycategory.go new file mode 100644 index 0000000..a263c5e --- /dev/null +++ b/backend/ent/dailycategory/dailycategory.go @@ -0,0 +1,127 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycategory + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the dailycategory type in the database. + Label = "daily_category" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeContents holds the string denoting the contents edge name in mutations. + EdgeContents = "contents" + // EdgeDailyItems holds the string denoting the daily_items edge name in mutations. + EdgeDailyItems = "daily_items" + // Table holds the table name of the dailycategory in the database. + Table = "daily_categories" + // ContentsTable is the table that holds the contents relation/edge. + ContentsTable = "daily_category_contents" + // ContentsInverseTable is the table name for the DailyCategoryContent entity. + // It exists in this package in order to avoid circular dependency with the "dailycategorycontent" package. + ContentsInverseTable = "daily_category_contents" + // ContentsColumn is the table column denoting the contents relation/edge. + ContentsColumn = "daily_category_contents" + // DailyItemsTable is the table that holds the daily_items relation/edge. + DailyItemsTable = "dailies" + // DailyItemsInverseTable is the table name for the Daily entity. + // It exists in this package in order to avoid circular dependency with the "daily" package. + DailyItemsInverseTable = "dailies" + // DailyItemsColumn is the table column denoting the daily_items relation/edge. + DailyItemsColumn = "daily_category_daily_items" +) + +// Columns holds all SQL columns for dailycategory fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the DailyCategory queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByContentsCount orders the results by contents count. +func ByContentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContentsStep(), opts...) + } +} + +// ByContents orders the results by contents terms. +func ByContents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDailyItemsCount orders the results by daily_items count. +func ByDailyItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDailyItemsStep(), opts...) + } +} + +// ByDailyItems orders the results by daily_items terms. +func ByDailyItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDailyItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newContentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) +} +func newDailyItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DailyItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyItemsTable, DailyItemsColumn), + ) +} diff --git a/backend/ent/dailycategory/where.go b/backend/ent/dailycategory/where.go new file mode 100644 index 0000000..1b5710b --- /dev/null +++ b/backend/ent/dailycategory/where.go @@ -0,0 +1,207 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycategory + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.DailyCategory { + return predicate.DailyCategory(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasContents applies the HasEdge predicate on the "contents" edge. +func HasContents() predicate.DailyCategory { + return predicate.DailyCategory(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContentsWith applies the HasEdge predicate on the "contents" edge with a given conditions (other predicates). +func HasContentsWith(preds ...predicate.DailyCategoryContent) predicate.DailyCategory { + return predicate.DailyCategory(func(s *sql.Selector) { + step := newContentsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDailyItems applies the HasEdge predicate on the "daily_items" edge. +func HasDailyItems() predicate.DailyCategory { + return predicate.DailyCategory(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyItemsTable, DailyItemsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDailyItemsWith applies the HasEdge predicate on the "daily_items" edge with a given conditions (other predicates). +func HasDailyItemsWith(preds ...predicate.Daily) predicate.DailyCategory { + return predicate.DailyCategory(func(s *sql.Selector) { + step := newDailyItemsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DailyCategory) predicate.DailyCategory { + return predicate.DailyCategory(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DailyCategory) predicate.DailyCategory { + return predicate.DailyCategory(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DailyCategory) predicate.DailyCategory { + return predicate.DailyCategory(sql.NotPredicates(p)) +} diff --git a/backend/ent/dailycategory_create.go b/backend/ent/dailycategory_create.go new file mode 100644 index 0000000..6475407 --- /dev/null +++ b/backend/ent/dailycategory_create.go @@ -0,0 +1,291 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryCreate is the builder for creating a DailyCategory entity. +type DailyCategoryCreate struct { + config + mutation *DailyCategoryMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (dcc *DailyCategoryCreate) SetCreatedAt(t time.Time) *DailyCategoryCreate { + dcc.mutation.SetCreatedAt(t) + return dcc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dcc *DailyCategoryCreate) SetNillableCreatedAt(t *time.Time) *DailyCategoryCreate { + if t != nil { + dcc.SetCreatedAt(*t) + } + return dcc +} + +// SetUpdatedAt sets the "updated_at" field. +func (dcc *DailyCategoryCreate) SetUpdatedAt(t time.Time) *DailyCategoryCreate { + dcc.mutation.SetUpdatedAt(t) + return dcc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (dcc *DailyCategoryCreate) SetNillableUpdatedAt(t *time.Time) *DailyCategoryCreate { + if t != nil { + dcc.SetUpdatedAt(*t) + } + return dcc +} + +// AddContentIDs adds the "contents" edge to the DailyCategoryContent entity by IDs. +func (dcc *DailyCategoryCreate) AddContentIDs(ids ...int) *DailyCategoryCreate { + dcc.mutation.AddContentIDs(ids...) + return dcc +} + +// AddContents adds the "contents" edges to the DailyCategoryContent entity. +func (dcc *DailyCategoryCreate) AddContents(d ...*DailyCategoryContent) *DailyCategoryCreate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcc.AddContentIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (dcc *DailyCategoryCreate) AddDailyItemIDs(ids ...string) *DailyCategoryCreate { + dcc.mutation.AddDailyItemIDs(ids...) + return dcc +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (dcc *DailyCategoryCreate) AddDailyItems(d ...*Daily) *DailyCategoryCreate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcc.AddDailyItemIDs(ids...) +} + +// Mutation returns the DailyCategoryMutation object of the builder. +func (dcc *DailyCategoryCreate) Mutation() *DailyCategoryMutation { + return dcc.mutation +} + +// Save creates the DailyCategory in the database. +func (dcc *DailyCategoryCreate) Save(ctx context.Context) (*DailyCategory, error) { + dcc.defaults() + return withHooks(ctx, dcc.sqlSave, dcc.mutation, dcc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dcc *DailyCategoryCreate) SaveX(ctx context.Context) *DailyCategory { + v, err := dcc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcc *DailyCategoryCreate) Exec(ctx context.Context) error { + _, err := dcc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcc *DailyCategoryCreate) ExecX(ctx context.Context) { + if err := dcc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dcc *DailyCategoryCreate) defaults() { + if _, ok := dcc.mutation.CreatedAt(); !ok { + v := dailycategory.DefaultCreatedAt() + dcc.mutation.SetCreatedAt(v) + } + if _, ok := dcc.mutation.UpdatedAt(); !ok { + v := dailycategory.DefaultUpdatedAt() + dcc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dcc *DailyCategoryCreate) check() error { + if _, ok := dcc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DailyCategory.created_at"`)} + } + if _, ok := dcc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DailyCategory.updated_at"`)} + } + return nil +} + +func (dcc *DailyCategoryCreate) sqlSave(ctx context.Context) (*DailyCategory, error) { + if err := dcc.check(); err != nil { + return nil, err + } + _node, _spec := dcc.createSpec() + if err := sqlgraph.CreateNode(ctx, dcc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + dcc.mutation.id = &_node.ID + dcc.mutation.done = true + return _node, nil +} + +func (dcc *DailyCategoryCreate) createSpec() (*DailyCategory, *sqlgraph.CreateSpec) { + var ( + _node = &DailyCategory{config: dcc.config} + _spec = sqlgraph.NewCreateSpec(dailycategory.Table, sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt)) + ) + if value, ok := dcc.mutation.CreatedAt(); ok { + _spec.SetField(dailycategory.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := dcc.mutation.UpdatedAt(); ok { + _spec.SetField(dailycategory.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := dcc.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := dcc.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DailyCategoryCreateBulk is the builder for creating many DailyCategory entities in bulk. +type DailyCategoryCreateBulk struct { + config + err error + builders []*DailyCategoryCreate +} + +// Save creates the DailyCategory entities in the database. +func (dccb *DailyCategoryCreateBulk) Save(ctx context.Context) ([]*DailyCategory, error) { + if dccb.err != nil { + return nil, dccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(dccb.builders)) + nodes := make([]*DailyCategory, len(dccb.builders)) + mutators := make([]Mutator, len(dccb.builders)) + for i := range dccb.builders { + func(i int, root context.Context) { + builder := dccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DailyCategoryMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dccb *DailyCategoryCreateBulk) SaveX(ctx context.Context) []*DailyCategory { + v, err := dccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dccb *DailyCategoryCreateBulk) Exec(ctx context.Context) error { + _, err := dccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccb *DailyCategoryCreateBulk) ExecX(ctx context.Context) { + if err := dccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycategory_delete.go b/backend/ent/dailycategory_delete.go new file mode 100644 index 0000000..94b5abd --- /dev/null +++ b/backend/ent/dailycategory_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryDelete is the builder for deleting a DailyCategory entity. +type DailyCategoryDelete struct { + config + hooks []Hook + mutation *DailyCategoryMutation +} + +// Where appends a list predicates to the DailyCategoryDelete builder. +func (dcd *DailyCategoryDelete) Where(ps ...predicate.DailyCategory) *DailyCategoryDelete { + dcd.mutation.Where(ps...) + return dcd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dcd *DailyCategoryDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dcd.sqlExec, dcd.mutation, dcd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcd *DailyCategoryDelete) ExecX(ctx context.Context) int { + n, err := dcd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dcd *DailyCategoryDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(dailycategory.Table, sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt)) + if ps := dcd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dcd.mutation.done = true + return affected, err +} + +// DailyCategoryDeleteOne is the builder for deleting a single DailyCategory entity. +type DailyCategoryDeleteOne struct { + dcd *DailyCategoryDelete +} + +// Where appends a list predicates to the DailyCategoryDelete builder. +func (dcdo *DailyCategoryDeleteOne) Where(ps ...predicate.DailyCategory) *DailyCategoryDeleteOne { + dcdo.dcd.mutation.Where(ps...) + return dcdo +} + +// Exec executes the deletion query. +func (dcdo *DailyCategoryDeleteOne) Exec(ctx context.Context) error { + n, err := dcdo.dcd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{dailycategory.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcdo *DailyCategoryDeleteOne) ExecX(ctx context.Context) { + if err := dcdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycategory_query.go b/backend/ent/dailycategory_query.go new file mode 100644 index 0000000..7f32fce --- /dev/null +++ b/backend/ent/dailycategory_query.go @@ -0,0 +1,682 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryQuery is the builder for querying DailyCategory entities. +type DailyCategoryQuery struct { + config + ctx *QueryContext + order []dailycategory.OrderOption + inters []Interceptor + predicates []predicate.DailyCategory + withContents *DailyCategoryContentQuery + withDailyItems *DailyQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DailyCategoryQuery builder. +func (dcq *DailyCategoryQuery) Where(ps ...predicate.DailyCategory) *DailyCategoryQuery { + dcq.predicates = append(dcq.predicates, ps...) + return dcq +} + +// Limit the number of records to be returned by this query. +func (dcq *DailyCategoryQuery) Limit(limit int) *DailyCategoryQuery { + dcq.ctx.Limit = &limit + return dcq +} + +// Offset to start from. +func (dcq *DailyCategoryQuery) Offset(offset int) *DailyCategoryQuery { + dcq.ctx.Offset = &offset + return dcq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dcq *DailyCategoryQuery) Unique(unique bool) *DailyCategoryQuery { + dcq.ctx.Unique = &unique + return dcq +} + +// Order specifies how the records should be ordered. +func (dcq *DailyCategoryQuery) Order(o ...dailycategory.OrderOption) *DailyCategoryQuery { + dcq.order = append(dcq.order, o...) + return dcq +} + +// QueryContents chains the current query on the "contents" edge. +func (dcq *DailyCategoryQuery) QueryContents() *DailyCategoryContentQuery { + query := (&DailyCategoryContentClient{config: dcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(dailycategory.Table, dailycategory.FieldID, selector), + sqlgraph.To(dailycategorycontent.Table, dailycategorycontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, dailycategory.ContentsTable, dailycategory.ContentsColumn), + ) + fromU = sqlgraph.SetNeighbors(dcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDailyItems chains the current query on the "daily_items" edge. +func (dcq *DailyCategoryQuery) QueryDailyItems() *DailyQuery { + query := (&DailyClient{config: dcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(dailycategory.Table, dailycategory.FieldID, selector), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, dailycategory.DailyItemsTable, dailycategory.DailyItemsColumn), + ) + fromU = sqlgraph.SetNeighbors(dcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first DailyCategory entity from the query. +// Returns a *NotFoundError when no DailyCategory was found. +func (dcq *DailyCategoryQuery) First(ctx context.Context) (*DailyCategory, error) { + nodes, err := dcq.Limit(1).All(setContextOp(ctx, dcq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{dailycategory.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dcq *DailyCategoryQuery) FirstX(ctx context.Context) *DailyCategory { + node, err := dcq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DailyCategory ID from the query. +// Returns a *NotFoundError when no DailyCategory ID was found. +func (dcq *DailyCategoryQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dcq.Limit(1).IDs(setContextOp(ctx, dcq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{dailycategory.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dcq *DailyCategoryQuery) FirstIDX(ctx context.Context) int { + id, err := dcq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DailyCategory entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DailyCategory entity is found. +// Returns a *NotFoundError when no DailyCategory entities are found. +func (dcq *DailyCategoryQuery) Only(ctx context.Context) (*DailyCategory, error) { + nodes, err := dcq.Limit(2).All(setContextOp(ctx, dcq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{dailycategory.Label} + default: + return nil, &NotSingularError{dailycategory.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dcq *DailyCategoryQuery) OnlyX(ctx context.Context) *DailyCategory { + node, err := dcq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DailyCategory ID in the query. +// Returns a *NotSingularError when more than one DailyCategory ID is found. +// Returns a *NotFoundError when no entities are found. +func (dcq *DailyCategoryQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dcq.Limit(2).IDs(setContextOp(ctx, dcq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{dailycategory.Label} + default: + err = &NotSingularError{dailycategory.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dcq *DailyCategoryQuery) OnlyIDX(ctx context.Context) int { + id, err := dcq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DailyCategories. +func (dcq *DailyCategoryQuery) All(ctx context.Context) ([]*DailyCategory, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryAll) + if err := dcq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*DailyCategory, *DailyCategoryQuery]() + return withInterceptors[[]*DailyCategory](ctx, dcq, qr, dcq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dcq *DailyCategoryQuery) AllX(ctx context.Context) []*DailyCategory { + nodes, err := dcq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DailyCategory IDs. +func (dcq *DailyCategoryQuery) IDs(ctx context.Context) (ids []int, err error) { + if dcq.ctx.Unique == nil && dcq.path != nil { + dcq.Unique(true) + } + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryIDs) + if err = dcq.Select(dailycategory.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dcq *DailyCategoryQuery) IDsX(ctx context.Context) []int { + ids, err := dcq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dcq *DailyCategoryQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryCount) + if err := dcq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dcq, querierCount[*DailyCategoryQuery](), dcq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dcq *DailyCategoryQuery) CountX(ctx context.Context) int { + count, err := dcq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dcq *DailyCategoryQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryExist) + switch _, err := dcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dcq *DailyCategoryQuery) ExistX(ctx context.Context) bool { + exist, err := dcq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DailyCategoryQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dcq *DailyCategoryQuery) Clone() *DailyCategoryQuery { + if dcq == nil { + return nil + } + return &DailyCategoryQuery{ + config: dcq.config, + ctx: dcq.ctx.Clone(), + order: append([]dailycategory.OrderOption{}, dcq.order...), + inters: append([]Interceptor{}, dcq.inters...), + predicates: append([]predicate.DailyCategory{}, dcq.predicates...), + withContents: dcq.withContents.Clone(), + withDailyItems: dcq.withDailyItems.Clone(), + // clone intermediate query. + sql: dcq.sql.Clone(), + path: dcq.path, + } +} + +// WithContents tells the query-builder to eager-load the nodes that are connected to +// the "contents" edge. The optional arguments are used to configure the query builder of the edge. +func (dcq *DailyCategoryQuery) WithContents(opts ...func(*DailyCategoryContentQuery)) *DailyCategoryQuery { + query := (&DailyCategoryContentClient{config: dcq.config}).Query() + for _, opt := range opts { + opt(query) + } + dcq.withContents = query + return dcq +} + +// WithDailyItems tells the query-builder to eager-load the nodes that are connected to +// the "daily_items" edge. The optional arguments are used to configure the query builder of the edge. +func (dcq *DailyCategoryQuery) WithDailyItems(opts ...func(*DailyQuery)) *DailyCategoryQuery { + query := (&DailyClient{config: dcq.config}).Query() + for _, opt := range opts { + opt(query) + } + dcq.withDailyItems = query + return dcq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DailyCategory.Query(). +// GroupBy(dailycategory.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dcq *DailyCategoryQuery) GroupBy(field string, fields ...string) *DailyCategoryGroupBy { + dcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DailyCategoryGroupBy{build: dcq} + grbuild.flds = &dcq.ctx.Fields + grbuild.label = dailycategory.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.DailyCategory.Query(). +// Select(dailycategory.FieldCreatedAt). +// Scan(ctx, &v) +func (dcq *DailyCategoryQuery) Select(fields ...string) *DailyCategorySelect { + dcq.ctx.Fields = append(dcq.ctx.Fields, fields...) + sbuild := &DailyCategorySelect{DailyCategoryQuery: dcq} + sbuild.label = dailycategory.Label + sbuild.flds, sbuild.scan = &dcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DailyCategorySelect configured with the given aggregations. +func (dcq *DailyCategoryQuery) Aggregate(fns ...AggregateFunc) *DailyCategorySelect { + return dcq.Select().Aggregate(fns...) +} + +func (dcq *DailyCategoryQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dcq); err != nil { + return err + } + } + } + for _, f := range dcq.ctx.Fields { + if !dailycategory.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dcq.path != nil { + prev, err := dcq.path(ctx) + if err != nil { + return err + } + dcq.sql = prev + } + return nil +} + +func (dcq *DailyCategoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DailyCategory, error) { + var ( + nodes = []*DailyCategory{} + _spec = dcq.querySpec() + loadedTypes = [2]bool{ + dcq.withContents != nil, + dcq.withDailyItems != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DailyCategory).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &DailyCategory{config: dcq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dcq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dcq.withContents; query != nil { + if err := dcq.loadContents(ctx, query, nodes, + func(n *DailyCategory) { n.Edges.Contents = []*DailyCategoryContent{} }, + func(n *DailyCategory, e *DailyCategoryContent) { n.Edges.Contents = append(n.Edges.Contents, e) }); err != nil { + return nil, err + } + } + if query := dcq.withDailyItems; query != nil { + if err := dcq.loadDailyItems(ctx, query, nodes, + func(n *DailyCategory) { n.Edges.DailyItems = []*Daily{} }, + func(n *DailyCategory, e *Daily) { n.Edges.DailyItems = append(n.Edges.DailyItems, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dcq *DailyCategoryQuery) loadContents(ctx context.Context, query *DailyCategoryContentQuery, nodes []*DailyCategory, init func(*DailyCategory), assign func(*DailyCategory, *DailyCategoryContent)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*DailyCategory) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.DailyCategoryContent(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(dailycategory.ContentsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.daily_category_contents + if fk == nil { + return fmt.Errorf(`foreign-key "daily_category_contents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "daily_category_contents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (dcq *DailyCategoryQuery) loadDailyItems(ctx context.Context, query *DailyQuery, nodes []*DailyCategory, init func(*DailyCategory), assign func(*DailyCategory, *Daily)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*DailyCategory) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Daily(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(dailycategory.DailyItemsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.daily_category_daily_items + if fk == nil { + return fmt.Errorf(`foreign-key "daily_category_daily_items" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "daily_category_daily_items" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (dcq *DailyCategoryQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dcq.querySpec() + _spec.Node.Columns = dcq.ctx.Fields + if len(dcq.ctx.Fields) > 0 { + _spec.Unique = dcq.ctx.Unique != nil && *dcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dcq.driver, _spec) +} + +func (dcq *DailyCategoryQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(dailycategory.Table, dailycategory.Columns, sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt)) + _spec.From = dcq.sql + if unique := dcq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dcq.path != nil { + _spec.Unique = true + } + if fields := dcq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycategory.FieldID) + for i := range fields { + if fields[i] != dailycategory.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dcq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dcq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dcq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dcq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dcq *DailyCategoryQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dcq.driver.Dialect()) + t1 := builder.Table(dailycategory.Table) + columns := dcq.ctx.Fields + if len(columns) == 0 { + columns = dailycategory.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dcq.sql != nil { + selector = dcq.sql + selector.Select(selector.Columns(columns...)...) + } + if dcq.ctx.Unique != nil && *dcq.ctx.Unique { + selector.Distinct() + } + for _, p := range dcq.predicates { + p(selector) + } + for _, p := range dcq.order { + p(selector) + } + if offset := dcq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dcq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DailyCategoryGroupBy is the group-by builder for DailyCategory entities. +type DailyCategoryGroupBy struct { + selector + build *DailyCategoryQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dcgb *DailyCategoryGroupBy) Aggregate(fns ...AggregateFunc) *DailyCategoryGroupBy { + dcgb.fns = append(dcgb.fns, fns...) + return dcgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dcgb *DailyCategoryGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dcgb.build.ctx, ent.OpQueryGroupBy) + if err := dcgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyCategoryQuery, *DailyCategoryGroupBy](ctx, dcgb.build, dcgb, dcgb.build.inters, v) +} + +func (dcgb *DailyCategoryGroupBy) sqlScan(ctx context.Context, root *DailyCategoryQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dcgb.fns)) + for _, fn := range dcgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dcgb.flds)+len(dcgb.fns)) + for _, f := range *dcgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dcgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dcgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DailyCategorySelect is the builder for selecting fields of DailyCategory entities. +type DailyCategorySelect struct { + *DailyCategoryQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (dcs *DailyCategorySelect) Aggregate(fns ...AggregateFunc) *DailyCategorySelect { + dcs.fns = append(dcs.fns, fns...) + return dcs +} + +// Scan applies the selector query and scans the result into the given value. +func (dcs *DailyCategorySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dcs.ctx, ent.OpQuerySelect) + if err := dcs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyCategoryQuery, *DailyCategorySelect](ctx, dcs.DailyCategoryQuery, dcs, dcs.inters, v) +} + +func (dcs *DailyCategorySelect) sqlScan(ctx context.Context, root *DailyCategoryQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(dcs.fns)) + for _, fn := range dcs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*dcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dcs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/dailycategory_update.go b/backend/ent/dailycategory_update.go new file mode 100644 index 0000000..939ba2c --- /dev/null +++ b/backend/ent/dailycategory_update.go @@ -0,0 +1,572 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryUpdate is the builder for updating DailyCategory entities. +type DailyCategoryUpdate struct { + config + hooks []Hook + mutation *DailyCategoryMutation +} + +// Where appends a list predicates to the DailyCategoryUpdate builder. +func (dcu *DailyCategoryUpdate) Where(ps ...predicate.DailyCategory) *DailyCategoryUpdate { + dcu.mutation.Where(ps...) + return dcu +} + +// SetCreatedAt sets the "created_at" field. +func (dcu *DailyCategoryUpdate) SetCreatedAt(t time.Time) *DailyCategoryUpdate { + dcu.mutation.SetCreatedAt(t) + return dcu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dcu *DailyCategoryUpdate) SetNillableCreatedAt(t *time.Time) *DailyCategoryUpdate { + if t != nil { + dcu.SetCreatedAt(*t) + } + return dcu +} + +// SetUpdatedAt sets the "updated_at" field. +func (dcu *DailyCategoryUpdate) SetUpdatedAt(t time.Time) *DailyCategoryUpdate { + dcu.mutation.SetUpdatedAt(t) + return dcu +} + +// AddContentIDs adds the "contents" edge to the DailyCategoryContent entity by IDs. +func (dcu *DailyCategoryUpdate) AddContentIDs(ids ...int) *DailyCategoryUpdate { + dcu.mutation.AddContentIDs(ids...) + return dcu +} + +// AddContents adds the "contents" edges to the DailyCategoryContent entity. +func (dcu *DailyCategoryUpdate) AddContents(d ...*DailyCategoryContent) *DailyCategoryUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcu.AddContentIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (dcu *DailyCategoryUpdate) AddDailyItemIDs(ids ...string) *DailyCategoryUpdate { + dcu.mutation.AddDailyItemIDs(ids...) + return dcu +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (dcu *DailyCategoryUpdate) AddDailyItems(d ...*Daily) *DailyCategoryUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcu.AddDailyItemIDs(ids...) +} + +// Mutation returns the DailyCategoryMutation object of the builder. +func (dcu *DailyCategoryUpdate) Mutation() *DailyCategoryMutation { + return dcu.mutation +} + +// ClearContents clears all "contents" edges to the DailyCategoryContent entity. +func (dcu *DailyCategoryUpdate) ClearContents() *DailyCategoryUpdate { + dcu.mutation.ClearContents() + return dcu +} + +// RemoveContentIDs removes the "contents" edge to DailyCategoryContent entities by IDs. +func (dcu *DailyCategoryUpdate) RemoveContentIDs(ids ...int) *DailyCategoryUpdate { + dcu.mutation.RemoveContentIDs(ids...) + return dcu +} + +// RemoveContents removes "contents" edges to DailyCategoryContent entities. +func (dcu *DailyCategoryUpdate) RemoveContents(d ...*DailyCategoryContent) *DailyCategoryUpdate { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcu.RemoveContentIDs(ids...) +} + +// ClearDailyItems clears all "daily_items" edges to the Daily entity. +func (dcu *DailyCategoryUpdate) ClearDailyItems() *DailyCategoryUpdate { + dcu.mutation.ClearDailyItems() + return dcu +} + +// RemoveDailyItemIDs removes the "daily_items" edge to Daily entities by IDs. +func (dcu *DailyCategoryUpdate) RemoveDailyItemIDs(ids ...string) *DailyCategoryUpdate { + dcu.mutation.RemoveDailyItemIDs(ids...) + return dcu +} + +// RemoveDailyItems removes "daily_items" edges to Daily entities. +func (dcu *DailyCategoryUpdate) RemoveDailyItems(d ...*Daily) *DailyCategoryUpdate { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcu.RemoveDailyItemIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dcu *DailyCategoryUpdate) Save(ctx context.Context) (int, error) { + dcu.defaults() + return withHooks(ctx, dcu.sqlSave, dcu.mutation, dcu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dcu *DailyCategoryUpdate) SaveX(ctx context.Context) int { + affected, err := dcu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dcu *DailyCategoryUpdate) Exec(ctx context.Context) error { + _, err := dcu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcu *DailyCategoryUpdate) ExecX(ctx context.Context) { + if err := dcu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dcu *DailyCategoryUpdate) defaults() { + if _, ok := dcu.mutation.UpdatedAt(); !ok { + v := dailycategory.UpdateDefaultUpdatedAt() + dcu.mutation.SetUpdatedAt(v) + } +} + +func (dcu *DailyCategoryUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(dailycategory.Table, dailycategory.Columns, sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt)) + if ps := dcu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dcu.mutation.CreatedAt(); ok { + _spec.SetField(dailycategory.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := dcu.mutation.UpdatedAt(); ok { + _spec.SetField(dailycategory.FieldUpdatedAt, field.TypeTime, value) + } + if dcu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcu.mutation.RemovedContentsIDs(); len(nodes) > 0 && !dcu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcu.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if dcu.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcu.mutation.RemovedDailyItemsIDs(); len(nodes) > 0 && !dcu.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcu.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, dcu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycategory.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + dcu.mutation.done = true + return n, nil +} + +// DailyCategoryUpdateOne is the builder for updating a single DailyCategory entity. +type DailyCategoryUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DailyCategoryMutation +} + +// SetCreatedAt sets the "created_at" field. +func (dcuo *DailyCategoryUpdateOne) SetCreatedAt(t time.Time) *DailyCategoryUpdateOne { + dcuo.mutation.SetCreatedAt(t) + return dcuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (dcuo *DailyCategoryUpdateOne) SetNillableCreatedAt(t *time.Time) *DailyCategoryUpdateOne { + if t != nil { + dcuo.SetCreatedAt(*t) + } + return dcuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (dcuo *DailyCategoryUpdateOne) SetUpdatedAt(t time.Time) *DailyCategoryUpdateOne { + dcuo.mutation.SetUpdatedAt(t) + return dcuo +} + +// AddContentIDs adds the "contents" edge to the DailyCategoryContent entity by IDs. +func (dcuo *DailyCategoryUpdateOne) AddContentIDs(ids ...int) *DailyCategoryUpdateOne { + dcuo.mutation.AddContentIDs(ids...) + return dcuo +} + +// AddContents adds the "contents" edges to the DailyCategoryContent entity. +func (dcuo *DailyCategoryUpdateOne) AddContents(d ...*DailyCategoryContent) *DailyCategoryUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcuo.AddContentIDs(ids...) +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by IDs. +func (dcuo *DailyCategoryUpdateOne) AddDailyItemIDs(ids ...string) *DailyCategoryUpdateOne { + dcuo.mutation.AddDailyItemIDs(ids...) + return dcuo +} + +// AddDailyItems adds the "daily_items" edges to the Daily entity. +func (dcuo *DailyCategoryUpdateOne) AddDailyItems(d ...*Daily) *DailyCategoryUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcuo.AddDailyItemIDs(ids...) +} + +// Mutation returns the DailyCategoryMutation object of the builder. +func (dcuo *DailyCategoryUpdateOne) Mutation() *DailyCategoryMutation { + return dcuo.mutation +} + +// ClearContents clears all "contents" edges to the DailyCategoryContent entity. +func (dcuo *DailyCategoryUpdateOne) ClearContents() *DailyCategoryUpdateOne { + dcuo.mutation.ClearContents() + return dcuo +} + +// RemoveContentIDs removes the "contents" edge to DailyCategoryContent entities by IDs. +func (dcuo *DailyCategoryUpdateOne) RemoveContentIDs(ids ...int) *DailyCategoryUpdateOne { + dcuo.mutation.RemoveContentIDs(ids...) + return dcuo +} + +// RemoveContents removes "contents" edges to DailyCategoryContent entities. +func (dcuo *DailyCategoryUpdateOne) RemoveContents(d ...*DailyCategoryContent) *DailyCategoryUpdateOne { + ids := make([]int, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcuo.RemoveContentIDs(ids...) +} + +// ClearDailyItems clears all "daily_items" edges to the Daily entity. +func (dcuo *DailyCategoryUpdateOne) ClearDailyItems() *DailyCategoryUpdateOne { + dcuo.mutation.ClearDailyItems() + return dcuo +} + +// RemoveDailyItemIDs removes the "daily_items" edge to Daily entities by IDs. +func (dcuo *DailyCategoryUpdateOne) RemoveDailyItemIDs(ids ...string) *DailyCategoryUpdateOne { + dcuo.mutation.RemoveDailyItemIDs(ids...) + return dcuo +} + +// RemoveDailyItems removes "daily_items" edges to Daily entities. +func (dcuo *DailyCategoryUpdateOne) RemoveDailyItems(d ...*Daily) *DailyCategoryUpdateOne { + ids := make([]string, len(d)) + for i := range d { + ids[i] = d[i].ID + } + return dcuo.RemoveDailyItemIDs(ids...) +} + +// Where appends a list predicates to the DailyCategoryUpdate builder. +func (dcuo *DailyCategoryUpdateOne) Where(ps ...predicate.DailyCategory) *DailyCategoryUpdateOne { + dcuo.mutation.Where(ps...) + return dcuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dcuo *DailyCategoryUpdateOne) Select(field string, fields ...string) *DailyCategoryUpdateOne { + dcuo.fields = append([]string{field}, fields...) + return dcuo +} + +// Save executes the query and returns the updated DailyCategory entity. +func (dcuo *DailyCategoryUpdateOne) Save(ctx context.Context) (*DailyCategory, error) { + dcuo.defaults() + return withHooks(ctx, dcuo.sqlSave, dcuo.mutation, dcuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dcuo *DailyCategoryUpdateOne) SaveX(ctx context.Context) *DailyCategory { + node, err := dcuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dcuo *DailyCategoryUpdateOne) Exec(ctx context.Context) error { + _, err := dcuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcuo *DailyCategoryUpdateOne) ExecX(ctx context.Context) { + if err := dcuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (dcuo *DailyCategoryUpdateOne) defaults() { + if _, ok := dcuo.mutation.UpdatedAt(); !ok { + v := dailycategory.UpdateDefaultUpdatedAt() + dcuo.mutation.SetUpdatedAt(v) + } +} + +func (dcuo *DailyCategoryUpdateOne) sqlSave(ctx context.Context) (_node *DailyCategory, err error) { + _spec := sqlgraph.NewUpdateSpec(dailycategory.Table, dailycategory.Columns, sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt)) + id, ok := dcuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DailyCategory.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dcuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycategory.FieldID) + for _, f := range fields { + if !dailycategory.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != dailycategory.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dcuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dcuo.mutation.CreatedAt(); ok { + _spec.SetField(dailycategory.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := dcuo.mutation.UpdatedAt(); ok { + _spec.SetField(dailycategory.FieldUpdatedAt, field.TypeTime, value) + } + if dcuo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcuo.mutation.RemovedContentsIDs(); len(nodes) > 0 && !dcuo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcuo.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.ContentsTable, + Columns: []string{dailycategory.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if dcuo.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcuo.mutation.RemovedDailyItemsIDs(); len(nodes) > 0 && !dcuo.mutation.DailyItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcuo.mutation.DailyItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: dailycategory.DailyItemsTable, + Columns: []string{dailycategory.DailyItemsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &DailyCategory{config: dcuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dcuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycategory.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + dcuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/dailycategorycontent.go b/backend/ent/dailycategorycontent.go new file mode 100644 index 0000000..5a21574 --- /dev/null +++ b/backend/ent/dailycategorycontent.go @@ -0,0 +1,153 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// DailyCategoryContent is the model entity for the DailyCategoryContent schema. +type DailyCategoryContent struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // LanguageCode holds the value of the "language_code" field. + LanguageCode dailycategorycontent.LanguageCode `json:"language_code,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DailyCategoryContentQuery when eager-loading is set. + Edges DailyCategoryContentEdges `json:"edges"` + daily_category_contents *int + selectValues sql.SelectValues +} + +// DailyCategoryContentEdges holds the relations/edges for other nodes in the graph. +type DailyCategoryContentEdges struct { + // Category holds the value of the category edge. + Category *DailyCategory `json:"category,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// CategoryOrErr returns the Category value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DailyCategoryContentEdges) CategoryOrErr() (*DailyCategory, error) { + if e.Category != nil { + return e.Category, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: dailycategory.Label} + } + return nil, &NotLoadedError{edge: "category"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DailyCategoryContent) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case dailycategorycontent.FieldID: + values[i] = new(sql.NullInt64) + case dailycategorycontent.FieldLanguageCode, dailycategorycontent.FieldName: + values[i] = new(sql.NullString) + case dailycategorycontent.ForeignKeys[0]: // daily_category_contents + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DailyCategoryContent fields. +func (dcc *DailyCategoryContent) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case dailycategorycontent.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + dcc.ID = int(value.Int64) + case dailycategorycontent.FieldLanguageCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field language_code", values[i]) + } else if value.Valid { + dcc.LanguageCode = dailycategorycontent.LanguageCode(value.String) + } + case dailycategorycontent.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + dcc.Name = value.String + } + case dailycategorycontent.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field daily_category_contents", value) + } else if value.Valid { + dcc.daily_category_contents = new(int) + *dcc.daily_category_contents = int(value.Int64) + } + default: + dcc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the DailyCategoryContent. +// This includes values selected through modifiers, order, etc. +func (dcc *DailyCategoryContent) Value(name string) (ent.Value, error) { + return dcc.selectValues.Get(name) +} + +// QueryCategory queries the "category" edge of the DailyCategoryContent entity. +func (dcc *DailyCategoryContent) QueryCategory() *DailyCategoryQuery { + return NewDailyCategoryContentClient(dcc.config).QueryCategory(dcc) +} + +// Update returns a builder for updating this DailyCategoryContent. +// Note that you need to call DailyCategoryContent.Unwrap() before calling this method if this DailyCategoryContent +// was returned from a transaction, and the transaction was committed or rolled back. +func (dcc *DailyCategoryContent) Update() *DailyCategoryContentUpdateOne { + return NewDailyCategoryContentClient(dcc.config).UpdateOne(dcc) +} + +// Unwrap unwraps the DailyCategoryContent entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dcc *DailyCategoryContent) Unwrap() *DailyCategoryContent { + _tx, ok := dcc.config.driver.(*txDriver) + if !ok { + panic("ent: DailyCategoryContent is not a transactional entity") + } + dcc.config.driver = _tx.drv + return dcc +} + +// String implements the fmt.Stringer. +func (dcc *DailyCategoryContent) String() string { + var builder strings.Builder + builder.WriteString("DailyCategoryContent(") + builder.WriteString(fmt.Sprintf("id=%v, ", dcc.ID)) + builder.WriteString("language_code=") + builder.WriteString(fmt.Sprintf("%v", dcc.LanguageCode)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(dcc.Name) + builder.WriteByte(')') + return builder.String() +} + +// DailyCategoryContents is a parsable slice of DailyCategoryContent. +type DailyCategoryContents []*DailyCategoryContent diff --git a/backend/ent/dailycategorycontent/dailycategorycontent.go b/backend/ent/dailycategorycontent/dailycategorycontent.go new file mode 100644 index 0000000..35aa113 --- /dev/null +++ b/backend/ent/dailycategorycontent/dailycategorycontent.go @@ -0,0 +1,121 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycategorycontent + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the dailycategorycontent type in the database. + Label = "daily_category_content" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldLanguageCode holds the string denoting the language_code field in the database. + FieldLanguageCode = "language_code" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // EdgeCategory holds the string denoting the category edge name in mutations. + EdgeCategory = "category" + // Table holds the table name of the dailycategorycontent in the database. + Table = "daily_category_contents" + // CategoryTable is the table that holds the category relation/edge. + CategoryTable = "daily_category_contents" + // CategoryInverseTable is the table name for the DailyCategory entity. + // It exists in this package in order to avoid circular dependency with the "dailycategory" package. + CategoryInverseTable = "daily_categories" + // CategoryColumn is the table column denoting the category relation/edge. + CategoryColumn = "daily_category_contents" +) + +// Columns holds all SQL columns for dailycategorycontent fields. +var Columns = []string{ + FieldID, + FieldLanguageCode, + FieldName, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "daily_category_contents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "daily_category_contents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error +) + +// LanguageCode defines the type for the "language_code" enum field. +type LanguageCode string + +// LanguageCode values. +const ( + LanguageCodeEn LanguageCode = "en" + LanguageCodeZhHans LanguageCode = "zh-Hans" + LanguageCodeZhHant LanguageCode = "zh-Hant" +) + +func (lc LanguageCode) String() string { + return string(lc) +} + +// LanguageCodeValidator is a validator for the "language_code" field enum values. It is called by the builders before save. +func LanguageCodeValidator(lc LanguageCode) error { + switch lc { + case LanguageCodeEn, LanguageCodeZhHans, LanguageCodeZhHant: + return nil + default: + return fmt.Errorf("dailycategorycontent: invalid enum value for language_code field: %q", lc) + } +} + +// OrderOption defines the ordering options for the DailyCategoryContent queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByLanguageCode orders the results by the language_code field. +func ByLanguageCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguageCode, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCategoryField orders the results by category field. +func ByCategoryField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCategoryStep(), sql.OrderByField(field, opts...)) + } +} +func newCategoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CategoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) +} diff --git a/backend/ent/dailycategorycontent/where.go b/backend/ent/dailycategorycontent/where.go new file mode 100644 index 0000000..8595871 --- /dev/null +++ b/backend/ent/dailycategorycontent/where.go @@ -0,0 +1,183 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycategorycontent + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEQ(FieldName, v)) +} + +// LanguageCodeEQ applies the EQ predicate on the "language_code" field. +func LanguageCodeEQ(v LanguageCode) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEQ(FieldLanguageCode, v)) +} + +// LanguageCodeNEQ applies the NEQ predicate on the "language_code" field. +func LanguageCodeNEQ(v LanguageCode) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNEQ(FieldLanguageCode, v)) +} + +// LanguageCodeIn applies the In predicate on the "language_code" field. +func LanguageCodeIn(vs ...LanguageCode) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeNotIn applies the NotIn predicate on the "language_code" field. +func LanguageCodeNotIn(vs ...LanguageCode) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNotIn(FieldLanguageCode, vs...)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.FieldContainsFold(FieldName, v)) +} + +// HasCategory applies the HasEdge predicate on the "category" edge. +func HasCategory() predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasCategoryWith applies the HasEdge predicate on the "category" edge with a given conditions (other predicates). +func HasCategoryWith(preds ...predicate.DailyCategory) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(func(s *sql.Selector) { + step := newCategoryStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DailyCategoryContent) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DailyCategoryContent) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DailyCategoryContent) predicate.DailyCategoryContent { + return predicate.DailyCategoryContent(sql.NotPredicates(p)) +} diff --git a/backend/ent/dailycategorycontent_create.go b/backend/ent/dailycategorycontent_create.go new file mode 100644 index 0000000..1d050ce --- /dev/null +++ b/backend/ent/dailycategorycontent_create.go @@ -0,0 +1,243 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryContentCreate is the builder for creating a DailyCategoryContent entity. +type DailyCategoryContentCreate struct { + config + mutation *DailyCategoryContentMutation + hooks []Hook +} + +// SetLanguageCode sets the "language_code" field. +func (dccc *DailyCategoryContentCreate) SetLanguageCode(dc dailycategorycontent.LanguageCode) *DailyCategoryContentCreate { + dccc.mutation.SetLanguageCode(dc) + return dccc +} + +// SetName sets the "name" field. +func (dccc *DailyCategoryContentCreate) SetName(s string) *DailyCategoryContentCreate { + dccc.mutation.SetName(s) + return dccc +} + +// SetCategoryID sets the "category" edge to the DailyCategory entity by ID. +func (dccc *DailyCategoryContentCreate) SetCategoryID(id int) *DailyCategoryContentCreate { + dccc.mutation.SetCategoryID(id) + return dccc +} + +// SetNillableCategoryID sets the "category" edge to the DailyCategory entity by ID if the given value is not nil. +func (dccc *DailyCategoryContentCreate) SetNillableCategoryID(id *int) *DailyCategoryContentCreate { + if id != nil { + dccc = dccc.SetCategoryID(*id) + } + return dccc +} + +// SetCategory sets the "category" edge to the DailyCategory entity. +func (dccc *DailyCategoryContentCreate) SetCategory(d *DailyCategory) *DailyCategoryContentCreate { + return dccc.SetCategoryID(d.ID) +} + +// Mutation returns the DailyCategoryContentMutation object of the builder. +func (dccc *DailyCategoryContentCreate) Mutation() *DailyCategoryContentMutation { + return dccc.mutation +} + +// Save creates the DailyCategoryContent in the database. +func (dccc *DailyCategoryContentCreate) Save(ctx context.Context) (*DailyCategoryContent, error) { + return withHooks(ctx, dccc.sqlSave, dccc.mutation, dccc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dccc *DailyCategoryContentCreate) SaveX(ctx context.Context) *DailyCategoryContent { + v, err := dccc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dccc *DailyCategoryContentCreate) Exec(ctx context.Context) error { + _, err := dccc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccc *DailyCategoryContentCreate) ExecX(ctx context.Context) { + if err := dccc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dccc *DailyCategoryContentCreate) check() error { + if _, ok := dccc.mutation.LanguageCode(); !ok { + return &ValidationError{Name: "language_code", err: errors.New(`ent: missing required field "DailyCategoryContent.language_code"`)} + } + if v, ok := dccc.mutation.LanguageCode(); ok { + if err := dailycategorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.language_code": %w`, err)} + } + } + if _, ok := dccc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DailyCategoryContent.name"`)} + } + if v, ok := dccc.mutation.Name(); ok { + if err := dailycategorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.name": %w`, err)} + } + } + return nil +} + +func (dccc *DailyCategoryContentCreate) sqlSave(ctx context.Context) (*DailyCategoryContent, error) { + if err := dccc.check(); err != nil { + return nil, err + } + _node, _spec := dccc.createSpec() + if err := sqlgraph.CreateNode(ctx, dccc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + dccc.mutation.id = &_node.ID + dccc.mutation.done = true + return _node, nil +} + +func (dccc *DailyCategoryContentCreate) createSpec() (*DailyCategoryContent, *sqlgraph.CreateSpec) { + var ( + _node = &DailyCategoryContent{config: dccc.config} + _spec = sqlgraph.NewCreateSpec(dailycategorycontent.Table, sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt)) + ) + if value, ok := dccc.mutation.LanguageCode(); ok { + _spec.SetField(dailycategorycontent.FieldLanguageCode, field.TypeEnum, value) + _node.LanguageCode = value + } + if value, ok := dccc.mutation.Name(); ok { + _spec.SetField(dailycategorycontent.FieldName, field.TypeString, value) + _node.Name = value + } + if nodes := dccc.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycategorycontent.CategoryTable, + Columns: []string{dailycategorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.daily_category_contents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DailyCategoryContentCreateBulk is the builder for creating many DailyCategoryContent entities in bulk. +type DailyCategoryContentCreateBulk struct { + config + err error + builders []*DailyCategoryContentCreate +} + +// Save creates the DailyCategoryContent entities in the database. +func (dcccb *DailyCategoryContentCreateBulk) Save(ctx context.Context) ([]*DailyCategoryContent, error) { + if dcccb.err != nil { + return nil, dcccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(dcccb.builders)) + nodes := make([]*DailyCategoryContent, len(dcccb.builders)) + mutators := make([]Mutator, len(dcccb.builders)) + for i := range dcccb.builders { + func(i int, root context.Context) { + builder := dcccb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DailyCategoryContentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dcccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dcccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dcccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dcccb *DailyCategoryContentCreateBulk) SaveX(ctx context.Context) []*DailyCategoryContent { + v, err := dcccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcccb *DailyCategoryContentCreateBulk) Exec(ctx context.Context) error { + _, err := dcccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcccb *DailyCategoryContentCreateBulk) ExecX(ctx context.Context) { + if err := dcccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycategorycontent_delete.go b/backend/ent/dailycategorycontent_delete.go new file mode 100644 index 0000000..c9a99fb --- /dev/null +++ b/backend/ent/dailycategorycontent_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryContentDelete is the builder for deleting a DailyCategoryContent entity. +type DailyCategoryContentDelete struct { + config + hooks []Hook + mutation *DailyCategoryContentMutation +} + +// Where appends a list predicates to the DailyCategoryContentDelete builder. +func (dccd *DailyCategoryContentDelete) Where(ps ...predicate.DailyCategoryContent) *DailyCategoryContentDelete { + dccd.mutation.Where(ps...) + return dccd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dccd *DailyCategoryContentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dccd.sqlExec, dccd.mutation, dccd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccd *DailyCategoryContentDelete) ExecX(ctx context.Context) int { + n, err := dccd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dccd *DailyCategoryContentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(dailycategorycontent.Table, sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt)) + if ps := dccd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dccd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dccd.mutation.done = true + return affected, err +} + +// DailyCategoryContentDeleteOne is the builder for deleting a single DailyCategoryContent entity. +type DailyCategoryContentDeleteOne struct { + dccd *DailyCategoryContentDelete +} + +// Where appends a list predicates to the DailyCategoryContentDelete builder. +func (dccdo *DailyCategoryContentDeleteOne) Where(ps ...predicate.DailyCategoryContent) *DailyCategoryContentDeleteOne { + dccdo.dccd.mutation.Where(ps...) + return dccdo +} + +// Exec executes the deletion query. +func (dccdo *DailyCategoryContentDeleteOne) Exec(ctx context.Context) error { + n, err := dccdo.dccd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{dailycategorycontent.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccdo *DailyCategoryContentDeleteOne) ExecX(ctx context.Context) { + if err := dccdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycategorycontent_query.go b/backend/ent/dailycategorycontent_query.go new file mode 100644 index 0000000..430bb7d --- /dev/null +++ b/backend/ent/dailycategorycontent_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryContentQuery is the builder for querying DailyCategoryContent entities. +type DailyCategoryContentQuery struct { + config + ctx *QueryContext + order []dailycategorycontent.OrderOption + inters []Interceptor + predicates []predicate.DailyCategoryContent + withCategory *DailyCategoryQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DailyCategoryContentQuery builder. +func (dccq *DailyCategoryContentQuery) Where(ps ...predicate.DailyCategoryContent) *DailyCategoryContentQuery { + dccq.predicates = append(dccq.predicates, ps...) + return dccq +} + +// Limit the number of records to be returned by this query. +func (dccq *DailyCategoryContentQuery) Limit(limit int) *DailyCategoryContentQuery { + dccq.ctx.Limit = &limit + return dccq +} + +// Offset to start from. +func (dccq *DailyCategoryContentQuery) Offset(offset int) *DailyCategoryContentQuery { + dccq.ctx.Offset = &offset + return dccq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dccq *DailyCategoryContentQuery) Unique(unique bool) *DailyCategoryContentQuery { + dccq.ctx.Unique = &unique + return dccq +} + +// Order specifies how the records should be ordered. +func (dccq *DailyCategoryContentQuery) Order(o ...dailycategorycontent.OrderOption) *DailyCategoryContentQuery { + dccq.order = append(dccq.order, o...) + return dccq +} + +// QueryCategory chains the current query on the "category" edge. +func (dccq *DailyCategoryContentQuery) QueryCategory() *DailyCategoryQuery { + query := (&DailyCategoryClient{config: dccq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dccq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dccq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(dailycategorycontent.Table, dailycategorycontent.FieldID, selector), + sqlgraph.To(dailycategory.Table, dailycategory.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, dailycategorycontent.CategoryTable, dailycategorycontent.CategoryColumn), + ) + fromU = sqlgraph.SetNeighbors(dccq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first DailyCategoryContent entity from the query. +// Returns a *NotFoundError when no DailyCategoryContent was found. +func (dccq *DailyCategoryContentQuery) First(ctx context.Context) (*DailyCategoryContent, error) { + nodes, err := dccq.Limit(1).All(setContextOp(ctx, dccq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{dailycategorycontent.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) FirstX(ctx context.Context) *DailyCategoryContent { + node, err := dccq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DailyCategoryContent ID from the query. +// Returns a *NotFoundError when no DailyCategoryContent ID was found. +func (dccq *DailyCategoryContentQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dccq.Limit(1).IDs(setContextOp(ctx, dccq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{dailycategorycontent.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) FirstIDX(ctx context.Context) int { + id, err := dccq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DailyCategoryContent entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DailyCategoryContent entity is found. +// Returns a *NotFoundError when no DailyCategoryContent entities are found. +func (dccq *DailyCategoryContentQuery) Only(ctx context.Context) (*DailyCategoryContent, error) { + nodes, err := dccq.Limit(2).All(setContextOp(ctx, dccq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{dailycategorycontent.Label} + default: + return nil, &NotSingularError{dailycategorycontent.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) OnlyX(ctx context.Context) *DailyCategoryContent { + node, err := dccq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DailyCategoryContent ID in the query. +// Returns a *NotSingularError when more than one DailyCategoryContent ID is found. +// Returns a *NotFoundError when no entities are found. +func (dccq *DailyCategoryContentQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dccq.Limit(2).IDs(setContextOp(ctx, dccq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{dailycategorycontent.Label} + default: + err = &NotSingularError{dailycategorycontent.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) OnlyIDX(ctx context.Context) int { + id, err := dccq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DailyCategoryContents. +func (dccq *DailyCategoryContentQuery) All(ctx context.Context) ([]*DailyCategoryContent, error) { + ctx = setContextOp(ctx, dccq.ctx, ent.OpQueryAll) + if err := dccq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*DailyCategoryContent, *DailyCategoryContentQuery]() + return withInterceptors[[]*DailyCategoryContent](ctx, dccq, qr, dccq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) AllX(ctx context.Context) []*DailyCategoryContent { + nodes, err := dccq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DailyCategoryContent IDs. +func (dccq *DailyCategoryContentQuery) IDs(ctx context.Context) (ids []int, err error) { + if dccq.ctx.Unique == nil && dccq.path != nil { + dccq.Unique(true) + } + ctx = setContextOp(ctx, dccq.ctx, ent.OpQueryIDs) + if err = dccq.Select(dailycategorycontent.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) IDsX(ctx context.Context) []int { + ids, err := dccq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dccq *DailyCategoryContentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dccq.ctx, ent.OpQueryCount) + if err := dccq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dccq, querierCount[*DailyCategoryContentQuery](), dccq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) CountX(ctx context.Context) int { + count, err := dccq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dccq *DailyCategoryContentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dccq.ctx, ent.OpQueryExist) + switch _, err := dccq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dccq *DailyCategoryContentQuery) ExistX(ctx context.Context) bool { + exist, err := dccq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DailyCategoryContentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dccq *DailyCategoryContentQuery) Clone() *DailyCategoryContentQuery { + if dccq == nil { + return nil + } + return &DailyCategoryContentQuery{ + config: dccq.config, + ctx: dccq.ctx.Clone(), + order: append([]dailycategorycontent.OrderOption{}, dccq.order...), + inters: append([]Interceptor{}, dccq.inters...), + predicates: append([]predicate.DailyCategoryContent{}, dccq.predicates...), + withCategory: dccq.withCategory.Clone(), + // clone intermediate query. + sql: dccq.sql.Clone(), + path: dccq.path, + } +} + +// WithCategory tells the query-builder to eager-load the nodes that are connected to +// the "category" edge. The optional arguments are used to configure the query builder of the edge. +func (dccq *DailyCategoryContentQuery) WithCategory(opts ...func(*DailyCategoryQuery)) *DailyCategoryContentQuery { + query := (&DailyCategoryClient{config: dccq.config}).Query() + for _, opt := range opts { + opt(query) + } + dccq.withCategory = query + return dccq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// LanguageCode dailycategorycontent.LanguageCode `json:"language_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DailyCategoryContent.Query(). +// GroupBy(dailycategorycontent.FieldLanguageCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dccq *DailyCategoryContentQuery) GroupBy(field string, fields ...string) *DailyCategoryContentGroupBy { + dccq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DailyCategoryContentGroupBy{build: dccq} + grbuild.flds = &dccq.ctx.Fields + grbuild.label = dailycategorycontent.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// LanguageCode dailycategorycontent.LanguageCode `json:"language_code,omitempty"` +// } +// +// client.DailyCategoryContent.Query(). +// Select(dailycategorycontent.FieldLanguageCode). +// Scan(ctx, &v) +func (dccq *DailyCategoryContentQuery) Select(fields ...string) *DailyCategoryContentSelect { + dccq.ctx.Fields = append(dccq.ctx.Fields, fields...) + sbuild := &DailyCategoryContentSelect{DailyCategoryContentQuery: dccq} + sbuild.label = dailycategorycontent.Label + sbuild.flds, sbuild.scan = &dccq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DailyCategoryContentSelect configured with the given aggregations. +func (dccq *DailyCategoryContentQuery) Aggregate(fns ...AggregateFunc) *DailyCategoryContentSelect { + return dccq.Select().Aggregate(fns...) +} + +func (dccq *DailyCategoryContentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dccq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dccq); err != nil { + return err + } + } + } + for _, f := range dccq.ctx.Fields { + if !dailycategorycontent.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dccq.path != nil { + prev, err := dccq.path(ctx) + if err != nil { + return err + } + dccq.sql = prev + } + return nil +} + +func (dccq *DailyCategoryContentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DailyCategoryContent, error) { + var ( + nodes = []*DailyCategoryContent{} + withFKs = dccq.withFKs + _spec = dccq.querySpec() + loadedTypes = [1]bool{ + dccq.withCategory != nil, + } + ) + if dccq.withCategory != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, dailycategorycontent.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DailyCategoryContent).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &DailyCategoryContent{config: dccq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dccq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dccq.withCategory; query != nil { + if err := dccq.loadCategory(ctx, query, nodes, nil, + func(n *DailyCategoryContent, e *DailyCategory) { n.Edges.Category = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dccq *DailyCategoryContentQuery) loadCategory(ctx context.Context, query *DailyCategoryQuery, nodes []*DailyCategoryContent, init func(*DailyCategoryContent), assign func(*DailyCategoryContent, *DailyCategory)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*DailyCategoryContent) + for i := range nodes { + if nodes[i].daily_category_contents == nil { + continue + } + fk := *nodes[i].daily_category_contents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(dailycategory.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "daily_category_contents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dccq *DailyCategoryContentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dccq.querySpec() + _spec.Node.Columns = dccq.ctx.Fields + if len(dccq.ctx.Fields) > 0 { + _spec.Unique = dccq.ctx.Unique != nil && *dccq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dccq.driver, _spec) +} + +func (dccq *DailyCategoryContentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(dailycategorycontent.Table, dailycategorycontent.Columns, sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt)) + _spec.From = dccq.sql + if unique := dccq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dccq.path != nil { + _spec.Unique = true + } + if fields := dccq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycategorycontent.FieldID) + for i := range fields { + if fields[i] != dailycategorycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dccq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dccq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dccq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dccq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dccq *DailyCategoryContentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dccq.driver.Dialect()) + t1 := builder.Table(dailycategorycontent.Table) + columns := dccq.ctx.Fields + if len(columns) == 0 { + columns = dailycategorycontent.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dccq.sql != nil { + selector = dccq.sql + selector.Select(selector.Columns(columns...)...) + } + if dccq.ctx.Unique != nil && *dccq.ctx.Unique { + selector.Distinct() + } + for _, p := range dccq.predicates { + p(selector) + } + for _, p := range dccq.order { + p(selector) + } + if offset := dccq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dccq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DailyCategoryContentGroupBy is the group-by builder for DailyCategoryContent entities. +type DailyCategoryContentGroupBy struct { + selector + build *DailyCategoryContentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dccgb *DailyCategoryContentGroupBy) Aggregate(fns ...AggregateFunc) *DailyCategoryContentGroupBy { + dccgb.fns = append(dccgb.fns, fns...) + return dccgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dccgb *DailyCategoryContentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dccgb.build.ctx, ent.OpQueryGroupBy) + if err := dccgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyCategoryContentQuery, *DailyCategoryContentGroupBy](ctx, dccgb.build, dccgb, dccgb.build.inters, v) +} + +func (dccgb *DailyCategoryContentGroupBy) sqlScan(ctx context.Context, root *DailyCategoryContentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dccgb.fns)) + for _, fn := range dccgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dccgb.flds)+len(dccgb.fns)) + for _, f := range *dccgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dccgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dccgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DailyCategoryContentSelect is the builder for selecting fields of DailyCategoryContent entities. +type DailyCategoryContentSelect struct { + *DailyCategoryContentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (dccs *DailyCategoryContentSelect) Aggregate(fns ...AggregateFunc) *DailyCategoryContentSelect { + dccs.fns = append(dccs.fns, fns...) + return dccs +} + +// Scan applies the selector query and scans the result into the given value. +func (dccs *DailyCategoryContentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dccs.ctx, ent.OpQuerySelect) + if err := dccs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyCategoryContentQuery, *DailyCategoryContentSelect](ctx, dccs.DailyCategoryContentQuery, dccs, dccs.inters, v) +} + +func (dccs *DailyCategoryContentSelect) sqlScan(ctx context.Context, root *DailyCategoryContentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(dccs.fns)) + for _, fn := range dccs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*dccs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dccs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/dailycategorycontent_update.go b/backend/ent/dailycategorycontent_update.go new file mode 100644 index 0000000..9c89907 --- /dev/null +++ b/backend/ent/dailycategorycontent_update.go @@ -0,0 +1,388 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyCategoryContentUpdate is the builder for updating DailyCategoryContent entities. +type DailyCategoryContentUpdate struct { + config + hooks []Hook + mutation *DailyCategoryContentMutation +} + +// Where appends a list predicates to the DailyCategoryContentUpdate builder. +func (dccu *DailyCategoryContentUpdate) Where(ps ...predicate.DailyCategoryContent) *DailyCategoryContentUpdate { + dccu.mutation.Where(ps...) + return dccu +} + +// SetLanguageCode sets the "language_code" field. +func (dccu *DailyCategoryContentUpdate) SetLanguageCode(dc dailycategorycontent.LanguageCode) *DailyCategoryContentUpdate { + dccu.mutation.SetLanguageCode(dc) + return dccu +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (dccu *DailyCategoryContentUpdate) SetNillableLanguageCode(dc *dailycategorycontent.LanguageCode) *DailyCategoryContentUpdate { + if dc != nil { + dccu.SetLanguageCode(*dc) + } + return dccu +} + +// SetName sets the "name" field. +func (dccu *DailyCategoryContentUpdate) SetName(s string) *DailyCategoryContentUpdate { + dccu.mutation.SetName(s) + return dccu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (dccu *DailyCategoryContentUpdate) SetNillableName(s *string) *DailyCategoryContentUpdate { + if s != nil { + dccu.SetName(*s) + } + return dccu +} + +// SetCategoryID sets the "category" edge to the DailyCategory entity by ID. +func (dccu *DailyCategoryContentUpdate) SetCategoryID(id int) *DailyCategoryContentUpdate { + dccu.mutation.SetCategoryID(id) + return dccu +} + +// SetNillableCategoryID sets the "category" edge to the DailyCategory entity by ID if the given value is not nil. +func (dccu *DailyCategoryContentUpdate) SetNillableCategoryID(id *int) *DailyCategoryContentUpdate { + if id != nil { + dccu = dccu.SetCategoryID(*id) + } + return dccu +} + +// SetCategory sets the "category" edge to the DailyCategory entity. +func (dccu *DailyCategoryContentUpdate) SetCategory(d *DailyCategory) *DailyCategoryContentUpdate { + return dccu.SetCategoryID(d.ID) +} + +// Mutation returns the DailyCategoryContentMutation object of the builder. +func (dccu *DailyCategoryContentUpdate) Mutation() *DailyCategoryContentMutation { + return dccu.mutation +} + +// ClearCategory clears the "category" edge to the DailyCategory entity. +func (dccu *DailyCategoryContentUpdate) ClearCategory() *DailyCategoryContentUpdate { + dccu.mutation.ClearCategory() + return dccu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dccu *DailyCategoryContentUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, dccu.sqlSave, dccu.mutation, dccu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dccu *DailyCategoryContentUpdate) SaveX(ctx context.Context) int { + affected, err := dccu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dccu *DailyCategoryContentUpdate) Exec(ctx context.Context) error { + _, err := dccu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccu *DailyCategoryContentUpdate) ExecX(ctx context.Context) { + if err := dccu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dccu *DailyCategoryContentUpdate) check() error { + if v, ok := dccu.mutation.LanguageCode(); ok { + if err := dailycategorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.language_code": %w`, err)} + } + } + if v, ok := dccu.mutation.Name(); ok { + if err := dailycategorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.name": %w`, err)} + } + } + return nil +} + +func (dccu *DailyCategoryContentUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := dccu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(dailycategorycontent.Table, dailycategorycontent.Columns, sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt)) + if ps := dccu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dccu.mutation.LanguageCode(); ok { + _spec.SetField(dailycategorycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := dccu.mutation.Name(); ok { + _spec.SetField(dailycategorycontent.FieldName, field.TypeString, value) + } + if dccu.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycategorycontent.CategoryTable, + Columns: []string{dailycategorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dccu.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycategorycontent.CategoryTable, + Columns: []string{dailycategorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, dccu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycategorycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + dccu.mutation.done = true + return n, nil +} + +// DailyCategoryContentUpdateOne is the builder for updating a single DailyCategoryContent entity. +type DailyCategoryContentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DailyCategoryContentMutation +} + +// SetLanguageCode sets the "language_code" field. +func (dccuo *DailyCategoryContentUpdateOne) SetLanguageCode(dc dailycategorycontent.LanguageCode) *DailyCategoryContentUpdateOne { + dccuo.mutation.SetLanguageCode(dc) + return dccuo +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (dccuo *DailyCategoryContentUpdateOne) SetNillableLanguageCode(dc *dailycategorycontent.LanguageCode) *DailyCategoryContentUpdateOne { + if dc != nil { + dccuo.SetLanguageCode(*dc) + } + return dccuo +} + +// SetName sets the "name" field. +func (dccuo *DailyCategoryContentUpdateOne) SetName(s string) *DailyCategoryContentUpdateOne { + dccuo.mutation.SetName(s) + return dccuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (dccuo *DailyCategoryContentUpdateOne) SetNillableName(s *string) *DailyCategoryContentUpdateOne { + if s != nil { + dccuo.SetName(*s) + } + return dccuo +} + +// SetCategoryID sets the "category" edge to the DailyCategory entity by ID. +func (dccuo *DailyCategoryContentUpdateOne) SetCategoryID(id int) *DailyCategoryContentUpdateOne { + dccuo.mutation.SetCategoryID(id) + return dccuo +} + +// SetNillableCategoryID sets the "category" edge to the DailyCategory entity by ID if the given value is not nil. +func (dccuo *DailyCategoryContentUpdateOne) SetNillableCategoryID(id *int) *DailyCategoryContentUpdateOne { + if id != nil { + dccuo = dccuo.SetCategoryID(*id) + } + return dccuo +} + +// SetCategory sets the "category" edge to the DailyCategory entity. +func (dccuo *DailyCategoryContentUpdateOne) SetCategory(d *DailyCategory) *DailyCategoryContentUpdateOne { + return dccuo.SetCategoryID(d.ID) +} + +// Mutation returns the DailyCategoryContentMutation object of the builder. +func (dccuo *DailyCategoryContentUpdateOne) Mutation() *DailyCategoryContentMutation { + return dccuo.mutation +} + +// ClearCategory clears the "category" edge to the DailyCategory entity. +func (dccuo *DailyCategoryContentUpdateOne) ClearCategory() *DailyCategoryContentUpdateOne { + dccuo.mutation.ClearCategory() + return dccuo +} + +// Where appends a list predicates to the DailyCategoryContentUpdate builder. +func (dccuo *DailyCategoryContentUpdateOne) Where(ps ...predicate.DailyCategoryContent) *DailyCategoryContentUpdateOne { + dccuo.mutation.Where(ps...) + return dccuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dccuo *DailyCategoryContentUpdateOne) Select(field string, fields ...string) *DailyCategoryContentUpdateOne { + dccuo.fields = append([]string{field}, fields...) + return dccuo +} + +// Save executes the query and returns the updated DailyCategoryContent entity. +func (dccuo *DailyCategoryContentUpdateOne) Save(ctx context.Context) (*DailyCategoryContent, error) { + return withHooks(ctx, dccuo.sqlSave, dccuo.mutation, dccuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dccuo *DailyCategoryContentUpdateOne) SaveX(ctx context.Context) *DailyCategoryContent { + node, err := dccuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dccuo *DailyCategoryContentUpdateOne) Exec(ctx context.Context) error { + _, err := dccuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccuo *DailyCategoryContentUpdateOne) ExecX(ctx context.Context) { + if err := dccuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dccuo *DailyCategoryContentUpdateOne) check() error { + if v, ok := dccuo.mutation.LanguageCode(); ok { + if err := dailycategorycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.language_code": %w`, err)} + } + } + if v, ok := dccuo.mutation.Name(); ok { + if err := dailycategorycontent.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "DailyCategoryContent.name": %w`, err)} + } + } + return nil +} + +func (dccuo *DailyCategoryContentUpdateOne) sqlSave(ctx context.Context) (_node *DailyCategoryContent, err error) { + if err := dccuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(dailycategorycontent.Table, dailycategorycontent.Columns, sqlgraph.NewFieldSpec(dailycategorycontent.FieldID, field.TypeInt)) + id, ok := dccuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DailyCategoryContent.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dccuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycategorycontent.FieldID) + for _, f := range fields { + if !dailycategorycontent.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != dailycategorycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dccuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dccuo.mutation.LanguageCode(); ok { + _spec.SetField(dailycategorycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := dccuo.mutation.Name(); ok { + _spec.SetField(dailycategorycontent.FieldName, field.TypeString, value) + } + if dccuo.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycategorycontent.CategoryTable, + Columns: []string{dailycategorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dccuo.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycategorycontent.CategoryTable, + Columns: []string{dailycategorycontent.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(dailycategory.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &DailyCategoryContent{config: dccuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dccuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycategorycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + dccuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/dailycontent.go b/backend/ent/dailycontent.go new file mode 100644 index 0000000..27c2ef4 --- /dev/null +++ b/backend/ent/dailycontent.go @@ -0,0 +1,153 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// DailyContent is the model entity for the DailyContent schema. +type DailyContent struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // LanguageCode holds the value of the "language_code" field. + LanguageCode dailycontent.LanguageCode `json:"language_code,omitempty"` + // Quote holds the value of the "quote" field. + Quote string `json:"quote,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the DailyContentQuery when eager-loading is set. + Edges DailyContentEdges `json:"edges"` + daily_contents *string + selectValues sql.SelectValues +} + +// DailyContentEdges holds the relations/edges for other nodes in the graph. +type DailyContentEdges struct { + // Daily holds the value of the daily edge. + Daily *Daily `json:"daily,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// DailyOrErr returns the Daily value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e DailyContentEdges) DailyOrErr() (*Daily, error) { + if e.Daily != nil { + return e.Daily, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: daily.Label} + } + return nil, &NotLoadedError{edge: "daily"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*DailyContent) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case dailycontent.FieldID: + values[i] = new(sql.NullInt64) + case dailycontent.FieldLanguageCode, dailycontent.FieldQuote: + values[i] = new(sql.NullString) + case dailycontent.ForeignKeys[0]: // daily_contents + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the DailyContent fields. +func (dc *DailyContent) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case dailycontent.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + dc.ID = int(value.Int64) + case dailycontent.FieldLanguageCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field language_code", values[i]) + } else if value.Valid { + dc.LanguageCode = dailycontent.LanguageCode(value.String) + } + case dailycontent.FieldQuote: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field quote", values[i]) + } else if value.Valid { + dc.Quote = value.String + } + case dailycontent.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field daily_contents", values[i]) + } else if value.Valid { + dc.daily_contents = new(string) + *dc.daily_contents = value.String + } + default: + dc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the DailyContent. +// This includes values selected through modifiers, order, etc. +func (dc *DailyContent) Value(name string) (ent.Value, error) { + return dc.selectValues.Get(name) +} + +// QueryDaily queries the "daily" edge of the DailyContent entity. +func (dc *DailyContent) QueryDaily() *DailyQuery { + return NewDailyContentClient(dc.config).QueryDaily(dc) +} + +// Update returns a builder for updating this DailyContent. +// Note that you need to call DailyContent.Unwrap() before calling this method if this DailyContent +// was returned from a transaction, and the transaction was committed or rolled back. +func (dc *DailyContent) Update() *DailyContentUpdateOne { + return NewDailyContentClient(dc.config).UpdateOne(dc) +} + +// Unwrap unwraps the DailyContent entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (dc *DailyContent) Unwrap() *DailyContent { + _tx, ok := dc.config.driver.(*txDriver) + if !ok { + panic("ent: DailyContent is not a transactional entity") + } + dc.config.driver = _tx.drv + return dc +} + +// String implements the fmt.Stringer. +func (dc *DailyContent) String() string { + var builder strings.Builder + builder.WriteString("DailyContent(") + builder.WriteString(fmt.Sprintf("id=%v, ", dc.ID)) + builder.WriteString("language_code=") + builder.WriteString(fmt.Sprintf("%v", dc.LanguageCode)) + builder.WriteString(", ") + builder.WriteString("quote=") + builder.WriteString(dc.Quote) + builder.WriteByte(')') + return builder.String() +} + +// DailyContents is a parsable slice of DailyContent. +type DailyContents []*DailyContent diff --git a/backend/ent/dailycontent/dailycontent.go b/backend/ent/dailycontent/dailycontent.go new file mode 100644 index 0000000..12def0e --- /dev/null +++ b/backend/ent/dailycontent/dailycontent.go @@ -0,0 +1,121 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycontent + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the dailycontent type in the database. + Label = "daily_content" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldLanguageCode holds the string denoting the language_code field in the database. + FieldLanguageCode = "language_code" + // FieldQuote holds the string denoting the quote field in the database. + FieldQuote = "quote" + // EdgeDaily holds the string denoting the daily edge name in mutations. + EdgeDaily = "daily" + // Table holds the table name of the dailycontent in the database. + Table = "daily_contents" + // DailyTable is the table that holds the daily relation/edge. + DailyTable = "daily_contents" + // DailyInverseTable is the table name for the Daily entity. + // It exists in this package in order to avoid circular dependency with the "daily" package. + DailyInverseTable = "dailies" + // DailyColumn is the table column denoting the daily relation/edge. + DailyColumn = "daily_contents" +) + +// Columns holds all SQL columns for dailycontent fields. +var Columns = []string{ + FieldID, + FieldLanguageCode, + FieldQuote, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "daily_contents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "daily_contents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // QuoteValidator is a validator for the "quote" field. It is called by the builders before save. + QuoteValidator func(string) error +) + +// LanguageCode defines the type for the "language_code" enum field. +type LanguageCode string + +// LanguageCode values. +const ( + LanguageCodeEN LanguageCode = "en" + LanguageCodeZH_HANS LanguageCode = "zh-Hans" + LanguageCodeZH_HANT LanguageCode = "zh-Hant" +) + +func (lc LanguageCode) String() string { + return string(lc) +} + +// LanguageCodeValidator is a validator for the "language_code" field enum values. It is called by the builders before save. +func LanguageCodeValidator(lc LanguageCode) error { + switch lc { + case LanguageCodeEN, LanguageCodeZH_HANS, LanguageCodeZH_HANT: + return nil + default: + return fmt.Errorf("dailycontent: invalid enum value for language_code field: %q", lc) + } +} + +// OrderOption defines the ordering options for the DailyContent queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByLanguageCode orders the results by the language_code field. +func ByLanguageCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguageCode, opts...).ToFunc() +} + +// ByQuote orders the results by the quote field. +func ByQuote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldQuote, opts...).ToFunc() +} + +// ByDailyField orders the results by daily field. +func ByDailyField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDailyStep(), sql.OrderByField(field, opts...)) + } +} +func newDailyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DailyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DailyTable, DailyColumn), + ) +} diff --git a/backend/ent/dailycontent/where.go b/backend/ent/dailycontent/where.go new file mode 100644 index 0000000..78b4d0d --- /dev/null +++ b/backend/ent/dailycontent/where.go @@ -0,0 +1,183 @@ +// Code generated by ent, DO NOT EDIT. + +package dailycontent + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.DailyContent { + return predicate.DailyContent(sql.FieldLTE(FieldID, id)) +} + +// Quote applies equality check predicate on the "quote" field. It's identical to QuoteEQ. +func Quote(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEQ(FieldQuote, v)) +} + +// LanguageCodeEQ applies the EQ predicate on the "language_code" field. +func LanguageCodeEQ(v LanguageCode) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEQ(FieldLanguageCode, v)) +} + +// LanguageCodeNEQ applies the NEQ predicate on the "language_code" field. +func LanguageCodeNEQ(v LanguageCode) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNEQ(FieldLanguageCode, v)) +} + +// LanguageCodeIn applies the In predicate on the "language_code" field. +func LanguageCodeIn(vs ...LanguageCode) predicate.DailyContent { + return predicate.DailyContent(sql.FieldIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeNotIn applies the NotIn predicate on the "language_code" field. +func LanguageCodeNotIn(vs ...LanguageCode) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNotIn(FieldLanguageCode, vs...)) +} + +// QuoteEQ applies the EQ predicate on the "quote" field. +func QuoteEQ(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEQ(FieldQuote, v)) +} + +// QuoteNEQ applies the NEQ predicate on the "quote" field. +func QuoteNEQ(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNEQ(FieldQuote, v)) +} + +// QuoteIn applies the In predicate on the "quote" field. +func QuoteIn(vs ...string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldIn(FieldQuote, vs...)) +} + +// QuoteNotIn applies the NotIn predicate on the "quote" field. +func QuoteNotIn(vs ...string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldNotIn(FieldQuote, vs...)) +} + +// QuoteGT applies the GT predicate on the "quote" field. +func QuoteGT(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldGT(FieldQuote, v)) +} + +// QuoteGTE applies the GTE predicate on the "quote" field. +func QuoteGTE(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldGTE(FieldQuote, v)) +} + +// QuoteLT applies the LT predicate on the "quote" field. +func QuoteLT(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldLT(FieldQuote, v)) +} + +// QuoteLTE applies the LTE predicate on the "quote" field. +func QuoteLTE(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldLTE(FieldQuote, v)) +} + +// QuoteContains applies the Contains predicate on the "quote" field. +func QuoteContains(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldContains(FieldQuote, v)) +} + +// QuoteHasPrefix applies the HasPrefix predicate on the "quote" field. +func QuoteHasPrefix(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldHasPrefix(FieldQuote, v)) +} + +// QuoteHasSuffix applies the HasSuffix predicate on the "quote" field. +func QuoteHasSuffix(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldHasSuffix(FieldQuote, v)) +} + +// QuoteEqualFold applies the EqualFold predicate on the "quote" field. +func QuoteEqualFold(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldEqualFold(FieldQuote, v)) +} + +// QuoteContainsFold applies the ContainsFold predicate on the "quote" field. +func QuoteContainsFold(v string) predicate.DailyContent { + return predicate.DailyContent(sql.FieldContainsFold(FieldQuote, v)) +} + +// HasDaily applies the HasEdge predicate on the "daily" edge. +func HasDaily() predicate.DailyContent { + return predicate.DailyContent(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DailyTable, DailyColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDailyWith applies the HasEdge predicate on the "daily" edge with a given conditions (other predicates). +func HasDailyWith(preds ...predicate.Daily) predicate.DailyContent { + return predicate.DailyContent(func(s *sql.Selector) { + step := newDailyStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.DailyContent) predicate.DailyContent { + return predicate.DailyContent(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.DailyContent) predicate.DailyContent { + return predicate.DailyContent(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.DailyContent) predicate.DailyContent { + return predicate.DailyContent(sql.NotPredicates(p)) +} diff --git a/backend/ent/dailycontent_create.go b/backend/ent/dailycontent_create.go new file mode 100644 index 0000000..fa24d55 --- /dev/null +++ b/backend/ent/dailycontent_create.go @@ -0,0 +1,243 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyContentCreate is the builder for creating a DailyContent entity. +type DailyContentCreate struct { + config + mutation *DailyContentMutation + hooks []Hook +} + +// SetLanguageCode sets the "language_code" field. +func (dcc *DailyContentCreate) SetLanguageCode(dc dailycontent.LanguageCode) *DailyContentCreate { + dcc.mutation.SetLanguageCode(dc) + return dcc +} + +// SetQuote sets the "quote" field. +func (dcc *DailyContentCreate) SetQuote(s string) *DailyContentCreate { + dcc.mutation.SetQuote(s) + return dcc +} + +// SetDailyID sets the "daily" edge to the Daily entity by ID. +func (dcc *DailyContentCreate) SetDailyID(id string) *DailyContentCreate { + dcc.mutation.SetDailyID(id) + return dcc +} + +// SetNillableDailyID sets the "daily" edge to the Daily entity by ID if the given value is not nil. +func (dcc *DailyContentCreate) SetNillableDailyID(id *string) *DailyContentCreate { + if id != nil { + dcc = dcc.SetDailyID(*id) + } + return dcc +} + +// SetDaily sets the "daily" edge to the Daily entity. +func (dcc *DailyContentCreate) SetDaily(d *Daily) *DailyContentCreate { + return dcc.SetDailyID(d.ID) +} + +// Mutation returns the DailyContentMutation object of the builder. +func (dcc *DailyContentCreate) Mutation() *DailyContentMutation { + return dcc.mutation +} + +// Save creates the DailyContent in the database. +func (dcc *DailyContentCreate) Save(ctx context.Context) (*DailyContent, error) { + return withHooks(ctx, dcc.sqlSave, dcc.mutation, dcc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (dcc *DailyContentCreate) SaveX(ctx context.Context) *DailyContent { + v, err := dcc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dcc *DailyContentCreate) Exec(ctx context.Context) error { + _, err := dcc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcc *DailyContentCreate) ExecX(ctx context.Context) { + if err := dcc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dcc *DailyContentCreate) check() error { + if _, ok := dcc.mutation.LanguageCode(); !ok { + return &ValidationError{Name: "language_code", err: errors.New(`ent: missing required field "DailyContent.language_code"`)} + } + if v, ok := dcc.mutation.LanguageCode(); ok { + if err := dailycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyContent.language_code": %w`, err)} + } + } + if _, ok := dcc.mutation.Quote(); !ok { + return &ValidationError{Name: "quote", err: errors.New(`ent: missing required field "DailyContent.quote"`)} + } + if v, ok := dcc.mutation.Quote(); ok { + if err := dailycontent.QuoteValidator(v); err != nil { + return &ValidationError{Name: "quote", err: fmt.Errorf(`ent: validator failed for field "DailyContent.quote": %w`, err)} + } + } + return nil +} + +func (dcc *DailyContentCreate) sqlSave(ctx context.Context) (*DailyContent, error) { + if err := dcc.check(); err != nil { + return nil, err + } + _node, _spec := dcc.createSpec() + if err := sqlgraph.CreateNode(ctx, dcc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + dcc.mutation.id = &_node.ID + dcc.mutation.done = true + return _node, nil +} + +func (dcc *DailyContentCreate) createSpec() (*DailyContent, *sqlgraph.CreateSpec) { + var ( + _node = &DailyContent{config: dcc.config} + _spec = sqlgraph.NewCreateSpec(dailycontent.Table, sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt)) + ) + if value, ok := dcc.mutation.LanguageCode(); ok { + _spec.SetField(dailycontent.FieldLanguageCode, field.TypeEnum, value) + _node.LanguageCode = value + } + if value, ok := dcc.mutation.Quote(); ok { + _spec.SetField(dailycontent.FieldQuote, field.TypeString, value) + _node.Quote = value + } + if nodes := dcc.mutation.DailyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycontent.DailyTable, + Columns: []string{dailycontent.DailyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.daily_contents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// DailyContentCreateBulk is the builder for creating many DailyContent entities in bulk. +type DailyContentCreateBulk struct { + config + err error + builders []*DailyContentCreate +} + +// Save creates the DailyContent entities in the database. +func (dccb *DailyContentCreateBulk) Save(ctx context.Context) ([]*DailyContent, error) { + if dccb.err != nil { + return nil, dccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(dccb.builders)) + nodes := make([]*DailyContent, len(dccb.builders)) + mutators := make([]Mutator, len(dccb.builders)) + for i := range dccb.builders { + func(i int, root context.Context) { + builder := dccb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*DailyContentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, dccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, dccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, dccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (dccb *DailyContentCreateBulk) SaveX(ctx context.Context) []*DailyContent { + v, err := dccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (dccb *DailyContentCreateBulk) Exec(ctx context.Context) error { + _, err := dccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dccb *DailyContentCreateBulk) ExecX(ctx context.Context) { + if err := dccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycontent_delete.go b/backend/ent/dailycontent_delete.go new file mode 100644 index 0000000..f7f7a47 --- /dev/null +++ b/backend/ent/dailycontent_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyContentDelete is the builder for deleting a DailyContent entity. +type DailyContentDelete struct { + config + hooks []Hook + mutation *DailyContentMutation +} + +// Where appends a list predicates to the DailyContentDelete builder. +func (dcd *DailyContentDelete) Where(ps ...predicate.DailyContent) *DailyContentDelete { + dcd.mutation.Where(ps...) + return dcd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (dcd *DailyContentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, dcd.sqlExec, dcd.mutation, dcd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcd *DailyContentDelete) ExecX(ctx context.Context) int { + n, err := dcd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (dcd *DailyContentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(dailycontent.Table, sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt)) + if ps := dcd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, dcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + dcd.mutation.done = true + return affected, err +} + +// DailyContentDeleteOne is the builder for deleting a single DailyContent entity. +type DailyContentDeleteOne struct { + dcd *DailyContentDelete +} + +// Where appends a list predicates to the DailyContentDelete builder. +func (dcdo *DailyContentDeleteOne) Where(ps ...predicate.DailyContent) *DailyContentDeleteOne { + dcdo.dcd.mutation.Where(ps...) + return dcdo +} + +// Exec executes the deletion query. +func (dcdo *DailyContentDeleteOne) Exec(ctx context.Context) error { + n, err := dcdo.dcd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{dailycontent.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcdo *DailyContentDeleteOne) ExecX(ctx context.Context) { + if err := dcdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/dailycontent_query.go b/backend/ent/dailycontent_query.go new file mode 100644 index 0000000..91756c5 --- /dev/null +++ b/backend/ent/dailycontent_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyContentQuery is the builder for querying DailyContent entities. +type DailyContentQuery struct { + config + ctx *QueryContext + order []dailycontent.OrderOption + inters []Interceptor + predicates []predicate.DailyContent + withDaily *DailyQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the DailyContentQuery builder. +func (dcq *DailyContentQuery) Where(ps ...predicate.DailyContent) *DailyContentQuery { + dcq.predicates = append(dcq.predicates, ps...) + return dcq +} + +// Limit the number of records to be returned by this query. +func (dcq *DailyContentQuery) Limit(limit int) *DailyContentQuery { + dcq.ctx.Limit = &limit + return dcq +} + +// Offset to start from. +func (dcq *DailyContentQuery) Offset(offset int) *DailyContentQuery { + dcq.ctx.Offset = &offset + return dcq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (dcq *DailyContentQuery) Unique(unique bool) *DailyContentQuery { + dcq.ctx.Unique = &unique + return dcq +} + +// Order specifies how the records should be ordered. +func (dcq *DailyContentQuery) Order(o ...dailycontent.OrderOption) *DailyContentQuery { + dcq.order = append(dcq.order, o...) + return dcq +} + +// QueryDaily chains the current query on the "daily" edge. +func (dcq *DailyContentQuery) QueryDaily() *DailyQuery { + query := (&DailyClient{config: dcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := dcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := dcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(dailycontent.Table, dailycontent.FieldID, selector), + sqlgraph.To(daily.Table, daily.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, dailycontent.DailyTable, dailycontent.DailyColumn), + ) + fromU = sqlgraph.SetNeighbors(dcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first DailyContent entity from the query. +// Returns a *NotFoundError when no DailyContent was found. +func (dcq *DailyContentQuery) First(ctx context.Context) (*DailyContent, error) { + nodes, err := dcq.Limit(1).All(setContextOp(ctx, dcq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{dailycontent.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (dcq *DailyContentQuery) FirstX(ctx context.Context) *DailyContent { + node, err := dcq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first DailyContent ID from the query. +// Returns a *NotFoundError when no DailyContent ID was found. +func (dcq *DailyContentQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dcq.Limit(1).IDs(setContextOp(ctx, dcq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{dailycontent.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (dcq *DailyContentQuery) FirstIDX(ctx context.Context) int { + id, err := dcq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single DailyContent entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one DailyContent entity is found. +// Returns a *NotFoundError when no DailyContent entities are found. +func (dcq *DailyContentQuery) Only(ctx context.Context) (*DailyContent, error) { + nodes, err := dcq.Limit(2).All(setContextOp(ctx, dcq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{dailycontent.Label} + default: + return nil, &NotSingularError{dailycontent.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (dcq *DailyContentQuery) OnlyX(ctx context.Context) *DailyContent { + node, err := dcq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only DailyContent ID in the query. +// Returns a *NotSingularError when more than one DailyContent ID is found. +// Returns a *NotFoundError when no entities are found. +func (dcq *DailyContentQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = dcq.Limit(2).IDs(setContextOp(ctx, dcq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{dailycontent.Label} + default: + err = &NotSingularError{dailycontent.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (dcq *DailyContentQuery) OnlyIDX(ctx context.Context) int { + id, err := dcq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of DailyContents. +func (dcq *DailyContentQuery) All(ctx context.Context) ([]*DailyContent, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryAll) + if err := dcq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*DailyContent, *DailyContentQuery]() + return withInterceptors[[]*DailyContent](ctx, dcq, qr, dcq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (dcq *DailyContentQuery) AllX(ctx context.Context) []*DailyContent { + nodes, err := dcq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of DailyContent IDs. +func (dcq *DailyContentQuery) IDs(ctx context.Context) (ids []int, err error) { + if dcq.ctx.Unique == nil && dcq.path != nil { + dcq.Unique(true) + } + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryIDs) + if err = dcq.Select(dailycontent.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (dcq *DailyContentQuery) IDsX(ctx context.Context) []int { + ids, err := dcq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (dcq *DailyContentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryCount) + if err := dcq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, dcq, querierCount[*DailyContentQuery](), dcq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (dcq *DailyContentQuery) CountX(ctx context.Context) int { + count, err := dcq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (dcq *DailyContentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, dcq.ctx, ent.OpQueryExist) + switch _, err := dcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (dcq *DailyContentQuery) ExistX(ctx context.Context) bool { + exist, err := dcq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the DailyContentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (dcq *DailyContentQuery) Clone() *DailyContentQuery { + if dcq == nil { + return nil + } + return &DailyContentQuery{ + config: dcq.config, + ctx: dcq.ctx.Clone(), + order: append([]dailycontent.OrderOption{}, dcq.order...), + inters: append([]Interceptor{}, dcq.inters...), + predicates: append([]predicate.DailyContent{}, dcq.predicates...), + withDaily: dcq.withDaily.Clone(), + // clone intermediate query. + sql: dcq.sql.Clone(), + path: dcq.path, + } +} + +// WithDaily tells the query-builder to eager-load the nodes that are connected to +// the "daily" edge. The optional arguments are used to configure the query builder of the edge. +func (dcq *DailyContentQuery) WithDaily(opts ...func(*DailyQuery)) *DailyContentQuery { + query := (&DailyClient{config: dcq.config}).Query() + for _, opt := range opts { + opt(query) + } + dcq.withDaily = query + return dcq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// LanguageCode dailycontent.LanguageCode `json:"language_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.DailyContent.Query(). +// GroupBy(dailycontent.FieldLanguageCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (dcq *DailyContentQuery) GroupBy(field string, fields ...string) *DailyContentGroupBy { + dcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DailyContentGroupBy{build: dcq} + grbuild.flds = &dcq.ctx.Fields + grbuild.label = dailycontent.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// LanguageCode dailycontent.LanguageCode `json:"language_code,omitempty"` +// } +// +// client.DailyContent.Query(). +// Select(dailycontent.FieldLanguageCode). +// Scan(ctx, &v) +func (dcq *DailyContentQuery) Select(fields ...string) *DailyContentSelect { + dcq.ctx.Fields = append(dcq.ctx.Fields, fields...) + sbuild := &DailyContentSelect{DailyContentQuery: dcq} + sbuild.label = dailycontent.Label + sbuild.flds, sbuild.scan = &dcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DailyContentSelect configured with the given aggregations. +func (dcq *DailyContentQuery) Aggregate(fns ...AggregateFunc) *DailyContentSelect { + return dcq.Select().Aggregate(fns...) +} + +func (dcq *DailyContentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range dcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dcq); err != nil { + return err + } + } + } + for _, f := range dcq.ctx.Fields { + if !dailycontent.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if dcq.path != nil { + prev, err := dcq.path(ctx) + if err != nil { + return err + } + dcq.sql = prev + } + return nil +} + +func (dcq *DailyContentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DailyContent, error) { + var ( + nodes = []*DailyContent{} + withFKs = dcq.withFKs + _spec = dcq.querySpec() + loadedTypes = [1]bool{ + dcq.withDaily != nil, + } + ) + if dcq.withDaily != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, dailycontent.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*DailyContent).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &DailyContent{config: dcq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, dcq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := dcq.withDaily; query != nil { + if err := dcq.loadDaily(ctx, query, nodes, nil, + func(n *DailyContent, e *Daily) { n.Edges.Daily = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (dcq *DailyContentQuery) loadDaily(ctx context.Context, query *DailyQuery, nodes []*DailyContent, init func(*DailyContent), assign func(*DailyContent, *Daily)) error { + ids := make([]string, 0, len(nodes)) + nodeids := make(map[string][]*DailyContent) + for i := range nodes { + if nodes[i].daily_contents == nil { + continue + } + fk := *nodes[i].daily_contents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(daily.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "daily_contents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (dcq *DailyContentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := dcq.querySpec() + _spec.Node.Columns = dcq.ctx.Fields + if len(dcq.ctx.Fields) > 0 { + _spec.Unique = dcq.ctx.Unique != nil && *dcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, dcq.driver, _spec) +} + +func (dcq *DailyContentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(dailycontent.Table, dailycontent.Columns, sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt)) + _spec.From = dcq.sql + if unique := dcq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if dcq.path != nil { + _spec.Unique = true + } + if fields := dcq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycontent.FieldID) + for i := range fields { + if fields[i] != dailycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := dcq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := dcq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := dcq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := dcq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (dcq *DailyContentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(dcq.driver.Dialect()) + t1 := builder.Table(dailycontent.Table) + columns := dcq.ctx.Fields + if len(columns) == 0 { + columns = dailycontent.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if dcq.sql != nil { + selector = dcq.sql + selector.Select(selector.Columns(columns...)...) + } + if dcq.ctx.Unique != nil && *dcq.ctx.Unique { + selector.Distinct() + } + for _, p := range dcq.predicates { + p(selector) + } + for _, p := range dcq.order { + p(selector) + } + if offset := dcq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := dcq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// DailyContentGroupBy is the group-by builder for DailyContent entities. +type DailyContentGroupBy struct { + selector + build *DailyContentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (dcgb *DailyContentGroupBy) Aggregate(fns ...AggregateFunc) *DailyContentGroupBy { + dcgb.fns = append(dcgb.fns, fns...) + return dcgb +} + +// Scan applies the selector query and scans the result into the given value. +func (dcgb *DailyContentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dcgb.build.ctx, ent.OpQueryGroupBy) + if err := dcgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyContentQuery, *DailyContentGroupBy](ctx, dcgb.build, dcgb, dcgb.build.inters, v) +} + +func (dcgb *DailyContentGroupBy) sqlScan(ctx context.Context, root *DailyContentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dcgb.fns)) + for _, fn := range dcgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dcgb.flds)+len(dcgb.fns)) + for _, f := range *dcgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*dcgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dcgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// DailyContentSelect is the builder for selecting fields of DailyContent entities. +type DailyContentSelect struct { + *DailyContentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (dcs *DailyContentSelect) Aggregate(fns ...AggregateFunc) *DailyContentSelect { + dcs.fns = append(dcs.fns, fns...) + return dcs +} + +// Scan applies the selector query and scans the result into the given value. +func (dcs *DailyContentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, dcs.ctx, ent.OpQuerySelect) + if err := dcs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*DailyContentQuery, *DailyContentSelect](ctx, dcs.DailyContentQuery, dcs, dcs.inters, v) +} + +func (dcs *DailyContentSelect) sqlScan(ctx context.Context, root *DailyContentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(dcs.fns)) + for _, fn := range dcs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*dcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := dcs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/dailycontent_update.go b/backend/ent/dailycontent_update.go new file mode 100644 index 0000000..c81d701 --- /dev/null +++ b/backend/ent/dailycontent_update.go @@ -0,0 +1,388 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// DailyContentUpdate is the builder for updating DailyContent entities. +type DailyContentUpdate struct { + config + hooks []Hook + mutation *DailyContentMutation +} + +// Where appends a list predicates to the DailyContentUpdate builder. +func (dcu *DailyContentUpdate) Where(ps ...predicate.DailyContent) *DailyContentUpdate { + dcu.mutation.Where(ps...) + return dcu +} + +// SetLanguageCode sets the "language_code" field. +func (dcu *DailyContentUpdate) SetLanguageCode(dc dailycontent.LanguageCode) *DailyContentUpdate { + dcu.mutation.SetLanguageCode(dc) + return dcu +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (dcu *DailyContentUpdate) SetNillableLanguageCode(dc *dailycontent.LanguageCode) *DailyContentUpdate { + if dc != nil { + dcu.SetLanguageCode(*dc) + } + return dcu +} + +// SetQuote sets the "quote" field. +func (dcu *DailyContentUpdate) SetQuote(s string) *DailyContentUpdate { + dcu.mutation.SetQuote(s) + return dcu +} + +// SetNillableQuote sets the "quote" field if the given value is not nil. +func (dcu *DailyContentUpdate) SetNillableQuote(s *string) *DailyContentUpdate { + if s != nil { + dcu.SetQuote(*s) + } + return dcu +} + +// SetDailyID sets the "daily" edge to the Daily entity by ID. +func (dcu *DailyContentUpdate) SetDailyID(id string) *DailyContentUpdate { + dcu.mutation.SetDailyID(id) + return dcu +} + +// SetNillableDailyID sets the "daily" edge to the Daily entity by ID if the given value is not nil. +func (dcu *DailyContentUpdate) SetNillableDailyID(id *string) *DailyContentUpdate { + if id != nil { + dcu = dcu.SetDailyID(*id) + } + return dcu +} + +// SetDaily sets the "daily" edge to the Daily entity. +func (dcu *DailyContentUpdate) SetDaily(d *Daily) *DailyContentUpdate { + return dcu.SetDailyID(d.ID) +} + +// Mutation returns the DailyContentMutation object of the builder. +func (dcu *DailyContentUpdate) Mutation() *DailyContentMutation { + return dcu.mutation +} + +// ClearDaily clears the "daily" edge to the Daily entity. +func (dcu *DailyContentUpdate) ClearDaily() *DailyContentUpdate { + dcu.mutation.ClearDaily() + return dcu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (dcu *DailyContentUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, dcu.sqlSave, dcu.mutation, dcu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dcu *DailyContentUpdate) SaveX(ctx context.Context) int { + affected, err := dcu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (dcu *DailyContentUpdate) Exec(ctx context.Context) error { + _, err := dcu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcu *DailyContentUpdate) ExecX(ctx context.Context) { + if err := dcu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dcu *DailyContentUpdate) check() error { + if v, ok := dcu.mutation.LanguageCode(); ok { + if err := dailycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyContent.language_code": %w`, err)} + } + } + if v, ok := dcu.mutation.Quote(); ok { + if err := dailycontent.QuoteValidator(v); err != nil { + return &ValidationError{Name: "quote", err: fmt.Errorf(`ent: validator failed for field "DailyContent.quote": %w`, err)} + } + } + return nil +} + +func (dcu *DailyContentUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := dcu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(dailycontent.Table, dailycontent.Columns, sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt)) + if ps := dcu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dcu.mutation.LanguageCode(); ok { + _spec.SetField(dailycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := dcu.mutation.Quote(); ok { + _spec.SetField(dailycontent.FieldQuote, field.TypeString, value) + } + if dcu.mutation.DailyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycontent.DailyTable, + Columns: []string{dailycontent.DailyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcu.mutation.DailyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycontent.DailyTable, + Columns: []string{dailycontent.DailyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, dcu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + dcu.mutation.done = true + return n, nil +} + +// DailyContentUpdateOne is the builder for updating a single DailyContent entity. +type DailyContentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *DailyContentMutation +} + +// SetLanguageCode sets the "language_code" field. +func (dcuo *DailyContentUpdateOne) SetLanguageCode(dc dailycontent.LanguageCode) *DailyContentUpdateOne { + dcuo.mutation.SetLanguageCode(dc) + return dcuo +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (dcuo *DailyContentUpdateOne) SetNillableLanguageCode(dc *dailycontent.LanguageCode) *DailyContentUpdateOne { + if dc != nil { + dcuo.SetLanguageCode(*dc) + } + return dcuo +} + +// SetQuote sets the "quote" field. +func (dcuo *DailyContentUpdateOne) SetQuote(s string) *DailyContentUpdateOne { + dcuo.mutation.SetQuote(s) + return dcuo +} + +// SetNillableQuote sets the "quote" field if the given value is not nil. +func (dcuo *DailyContentUpdateOne) SetNillableQuote(s *string) *DailyContentUpdateOne { + if s != nil { + dcuo.SetQuote(*s) + } + return dcuo +} + +// SetDailyID sets the "daily" edge to the Daily entity by ID. +func (dcuo *DailyContentUpdateOne) SetDailyID(id string) *DailyContentUpdateOne { + dcuo.mutation.SetDailyID(id) + return dcuo +} + +// SetNillableDailyID sets the "daily" edge to the Daily entity by ID if the given value is not nil. +func (dcuo *DailyContentUpdateOne) SetNillableDailyID(id *string) *DailyContentUpdateOne { + if id != nil { + dcuo = dcuo.SetDailyID(*id) + } + return dcuo +} + +// SetDaily sets the "daily" edge to the Daily entity. +func (dcuo *DailyContentUpdateOne) SetDaily(d *Daily) *DailyContentUpdateOne { + return dcuo.SetDailyID(d.ID) +} + +// Mutation returns the DailyContentMutation object of the builder. +func (dcuo *DailyContentUpdateOne) Mutation() *DailyContentMutation { + return dcuo.mutation +} + +// ClearDaily clears the "daily" edge to the Daily entity. +func (dcuo *DailyContentUpdateOne) ClearDaily() *DailyContentUpdateOne { + dcuo.mutation.ClearDaily() + return dcuo +} + +// Where appends a list predicates to the DailyContentUpdate builder. +func (dcuo *DailyContentUpdateOne) Where(ps ...predicate.DailyContent) *DailyContentUpdateOne { + dcuo.mutation.Where(ps...) + return dcuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (dcuo *DailyContentUpdateOne) Select(field string, fields ...string) *DailyContentUpdateOne { + dcuo.fields = append([]string{field}, fields...) + return dcuo +} + +// Save executes the query and returns the updated DailyContent entity. +func (dcuo *DailyContentUpdateOne) Save(ctx context.Context) (*DailyContent, error) { + return withHooks(ctx, dcuo.sqlSave, dcuo.mutation, dcuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (dcuo *DailyContentUpdateOne) SaveX(ctx context.Context) *DailyContent { + node, err := dcuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (dcuo *DailyContentUpdateOne) Exec(ctx context.Context) error { + _, err := dcuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (dcuo *DailyContentUpdateOne) ExecX(ctx context.Context) { + if err := dcuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (dcuo *DailyContentUpdateOne) check() error { + if v, ok := dcuo.mutation.LanguageCode(); ok { + if err := dailycontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "DailyContent.language_code": %w`, err)} + } + } + if v, ok := dcuo.mutation.Quote(); ok { + if err := dailycontent.QuoteValidator(v); err != nil { + return &ValidationError{Name: "quote", err: fmt.Errorf(`ent: validator failed for field "DailyContent.quote": %w`, err)} + } + } + return nil +} + +func (dcuo *DailyContentUpdateOne) sqlSave(ctx context.Context) (_node *DailyContent, err error) { + if err := dcuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(dailycontent.Table, dailycontent.Columns, sqlgraph.NewFieldSpec(dailycontent.FieldID, field.TypeInt)) + id, ok := dcuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DailyContent.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := dcuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, dailycontent.FieldID) + for _, f := range fields { + if !dailycontent.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != dailycontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := dcuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := dcuo.mutation.LanguageCode(); ok { + _spec.SetField(dailycontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := dcuo.mutation.Quote(); ok { + _spec.SetField(dailycontent.FieldQuote, field.TypeString, value) + } + if dcuo.mutation.DailyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycontent.DailyTable, + Columns: []string{dailycontent.DailyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := dcuo.mutation.DailyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: dailycontent.DailyTable, + Columns: []string{dailycontent.DailyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(daily.FieldID, field.TypeString), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &DailyContent{config: dcuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, dcuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{dailycontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + dcuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/ent.go b/backend/ent/ent.go new file mode 100644 index 0000000..4d61e01 --- /dev/null +++ b/backend/ent/ent.go @@ -0,0 +1,638 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + category.Table: category.ValidColumn, + categorycontent.Table: categorycontent.ValidColumn, + contributor.Table: contributor.ValidColumn, + contributorrole.Table: contributorrole.ValidColumn, + contributorsociallink.Table: contributorsociallink.ValidColumn, + daily.Table: daily.ValidColumn, + dailycategory.Table: dailycategory.ValidColumn, + dailycategorycontent.Table: dailycategorycontent.ValidColumn, + dailycontent.Table: dailycontent.ValidColumn, + media.Table: media.ValidColumn, + permission.Table: permission.ValidColumn, + post.Table: post.ValidColumn, + postcontent.Table: postcontent.ValidColumn, + postcontributor.Table: postcontributor.ValidColumn, + role.Table: role.ValidColumn, + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/backend/ent/enttest/enttest.go b/backend/ent/enttest/enttest.go new file mode 100644 index 0000000..b6d30ae --- /dev/null +++ b/backend/ent/enttest/enttest.go @@ -0,0 +1,85 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "tss-rocks-be/ent" + // required by schema hooks. + _ "tss-rocks-be/ent/runtime" + + "tss-rocks-be/ent/migrate" + + "entgo.io/ent/dialect/sql/schema" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/backend/ent/generate.go b/backend/ent/generate.go new file mode 100644 index 0000000..8d3fdfd --- /dev/null +++ b/backend/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go new file mode 100644 index 0000000..fc38e78 --- /dev/null +++ b/backend/ent/hook/hook.go @@ -0,0 +1,378 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + "tss-rocks-be/ent" +) + +// The CategoryFunc type is an adapter to allow the use of ordinary +// function as Category mutator. +type CategoryFunc func(context.Context, *ent.CategoryMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f CategoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.CategoryMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CategoryMutation", m) +} + +// The CategoryContentFunc type is an adapter to allow the use of ordinary +// function as CategoryContent mutator. +type CategoryContentFunc func(context.Context, *ent.CategoryContentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f CategoryContentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.CategoryContentMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CategoryContentMutation", m) +} + +// The ContributorFunc type is an adapter to allow the use of ordinary +// function as Contributor mutator. +type ContributorFunc func(context.Context, *ent.ContributorMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ContributorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ContributorMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ContributorMutation", m) +} + +// The ContributorRoleFunc type is an adapter to allow the use of ordinary +// function as ContributorRole mutator. +type ContributorRoleFunc func(context.Context, *ent.ContributorRoleMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ContributorRoleFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ContributorRoleMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ContributorRoleMutation", m) +} + +// The ContributorSocialLinkFunc type is an adapter to allow the use of ordinary +// function as ContributorSocialLink mutator. +type ContributorSocialLinkFunc func(context.Context, *ent.ContributorSocialLinkMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ContributorSocialLinkFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ContributorSocialLinkMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ContributorSocialLinkMutation", m) +} + +// The DailyFunc type is an adapter to allow the use of ordinary +// function as Daily mutator. +type DailyFunc func(context.Context, *ent.DailyMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DailyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.DailyMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DailyMutation", m) +} + +// The DailyCategoryFunc type is an adapter to allow the use of ordinary +// function as DailyCategory mutator. +type DailyCategoryFunc func(context.Context, *ent.DailyCategoryMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DailyCategoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.DailyCategoryMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DailyCategoryMutation", m) +} + +// The DailyCategoryContentFunc type is an adapter to allow the use of ordinary +// function as DailyCategoryContent mutator. +type DailyCategoryContentFunc func(context.Context, *ent.DailyCategoryContentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DailyCategoryContentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.DailyCategoryContentMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DailyCategoryContentMutation", m) +} + +// The DailyContentFunc type is an adapter to allow the use of ordinary +// function as DailyContent mutator. +type DailyContentFunc func(context.Context, *ent.DailyContentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f DailyContentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.DailyContentMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DailyContentMutation", m) +} + +// The MediaFunc type is an adapter to allow the use of ordinary +// function as Media mutator. +type MediaFunc func(context.Context, *ent.MediaMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MediaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.MediaMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MediaMutation", m) +} + +// The PermissionFunc type is an adapter to allow the use of ordinary +// function as Permission mutator. +type PermissionFunc func(context.Context, *ent.PermissionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PermissionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PermissionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PermissionMutation", m) +} + +// The PostFunc type is an adapter to allow the use of ordinary +// function as Post mutator. +type PostFunc func(context.Context, *ent.PostMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PostFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PostMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PostMutation", m) +} + +// The PostContentFunc type is an adapter to allow the use of ordinary +// function as PostContent mutator. +type PostContentFunc func(context.Context, *ent.PostContentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PostContentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PostContentMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PostContentMutation", m) +} + +// The PostContributorFunc type is an adapter to allow the use of ordinary +// function as PostContributor mutator. +type PostContributorFunc func(context.Context, *ent.PostContributorMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PostContributorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PostContributorMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PostContributorMutation", m) +} + +// The RoleFunc type is an adapter to allow the use of ordinary +// function as Role mutator. +type RoleFunc func(context.Context, *ent.RoleMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f RoleFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.RoleMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RoleMutation", m) +} + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/backend/ent/media.go b/backend/ent/media.go new file mode 100644 index 0000000..935643b --- /dev/null +++ b/backend/ent/media.go @@ -0,0 +1,222 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Media is the model entity for the Media schema. +type Media struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // StorageID holds the value of the "storage_id" field. + StorageID string `json:"storage_id,omitempty"` + // OriginalName holds the value of the "original_name" field. + OriginalName string `json:"original_name,omitempty"` + // MimeType holds the value of the "mime_type" field. + MimeType string `json:"mime_type,omitempty"` + // Size holds the value of the "size" field. + Size int64 `json:"size,omitempty"` + // URL holds the value of the "url" field. + URL string `json:"url,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy string `json:"created_by,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the MediaQuery when eager-loading is set. + Edges MediaEdges `json:"edges"` + user_media *int + selectValues sql.SelectValues +} + +// MediaEdges holds the relations/edges for other nodes in the graph. +type MediaEdges struct { + // Owner holds the value of the owner edge. + Owner *User `json:"owner,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// OwnerOrErr returns the Owner value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e MediaEdges) OwnerOrErr() (*User, error) { + if e.Owner != nil { + return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "owner"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Media) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case media.FieldID, media.FieldSize: + values[i] = new(sql.NullInt64) + case media.FieldStorageID, media.FieldOriginalName, media.FieldMimeType, media.FieldURL, media.FieldCreatedBy: + values[i] = new(sql.NullString) + case media.FieldCreatedAt, media.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case media.ForeignKeys[0]: // user_media + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Media fields. +func (m *Media) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case media.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case media.FieldStorageID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field storage_id", values[i]) + } else if value.Valid { + m.StorageID = value.String + } + case media.FieldOriginalName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field original_name", values[i]) + } else if value.Valid { + m.OriginalName = value.String + } + case media.FieldMimeType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field mime_type", values[i]) + } else if value.Valid { + m.MimeType = value.String + } + case media.FieldSize: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field size", values[i]) + } else if value.Valid { + m.Size = value.Int64 + } + case media.FieldURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field url", values[i]) + } else if value.Valid { + m.URL = value.String + } + case media.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + m.CreatedAt = value.Time + } + case media.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + m.UpdatedAt = value.Time + } + case media.FieldCreatedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + m.CreatedBy = value.String + } + case media.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field user_media", value) + } else if value.Valid { + m.user_media = new(int) + *m.user_media = int(value.Int64) + } + default: + m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Media. +// This includes values selected through modifiers, order, etc. +func (m *Media) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + +// QueryOwner queries the "owner" edge of the Media entity. +func (m *Media) QueryOwner() *UserQuery { + return NewMediaClient(m.config).QueryOwner(m) +} + +// Update returns a builder for updating this Media. +// Note that you need to call Media.Unwrap() before calling this method if this Media +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Media) Update() *MediaUpdateOne { + return NewMediaClient(m.config).UpdateOne(m) +} + +// Unwrap unwraps the Media entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Media) Unwrap() *Media { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Media is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Media) String() string { + var builder strings.Builder + builder.WriteString("Media(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString("storage_id=") + builder.WriteString(m.StorageID) + builder.WriteString(", ") + builder.WriteString("original_name=") + builder.WriteString(m.OriginalName) + builder.WriteString(", ") + builder.WriteString("mime_type=") + builder.WriteString(m.MimeType) + builder.WriteString(", ") + builder.WriteString("size=") + builder.WriteString(fmt.Sprintf("%v", m.Size)) + builder.WriteString(", ") + builder.WriteString("url=") + builder.WriteString(m.URL) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(m.CreatedBy) + builder.WriteByte(')') + return builder.String() +} + +// MediaSlice is a parsable slice of Media. +type MediaSlice []*Media diff --git a/backend/ent/media/media.go b/backend/ent/media/media.go new file mode 100644 index 0000000..30604de --- /dev/null +++ b/backend/ent/media/media.go @@ -0,0 +1,159 @@ +// Code generated by ent, DO NOT EDIT. + +package media + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the media type in the database. + Label = "media" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldStorageID holds the string denoting the storage_id field in the database. + FieldStorageID = "storage_id" + // FieldOriginalName holds the string denoting the original_name field in the database. + FieldOriginalName = "original_name" + // FieldMimeType holds the string denoting the mime_type field in the database. + FieldMimeType = "mime_type" + // FieldSize holds the string denoting the size field in the database. + FieldSize = "size" + // FieldURL holds the string denoting the url field in the database. + FieldURL = "url" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // EdgeOwner holds the string denoting the owner edge name in mutations. + EdgeOwner = "owner" + // Table holds the table name of the media in the database. + Table = "media" + // OwnerTable is the table that holds the owner relation/edge. + OwnerTable = "media" + // OwnerInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + OwnerInverseTable = "users" + // OwnerColumn is the table column denoting the owner relation/edge. + OwnerColumn = "user_media" +) + +// Columns holds all SQL columns for media fields. +var Columns = []string{ + FieldID, + FieldStorageID, + FieldOriginalName, + FieldMimeType, + FieldSize, + FieldURL, + FieldCreatedAt, + FieldUpdatedAt, + FieldCreatedBy, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "media" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "user_media", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // StorageIDValidator is a validator for the "storage_id" field. It is called by the builders before save. + StorageIDValidator func(string) error + // OriginalNameValidator is a validator for the "original_name" field. It is called by the builders before save. + OriginalNameValidator func(string) error + // MimeTypeValidator is a validator for the "mime_type" field. It is called by the builders before save. + MimeTypeValidator func(string) error + // SizeValidator is a validator for the "size" field. It is called by the builders before save. + SizeValidator func(int64) error + // URLValidator is a validator for the "url" field. It is called by the builders before save. + URLValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Media queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByStorageID orders the results by the storage_id field. +func ByStorageID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStorageID, opts...).ToFunc() +} + +// ByOriginalName orders the results by the original_name field. +func ByOriginalName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOriginalName, opts...).ToFunc() +} + +// ByMimeType orders the results by the mime_type field. +func ByMimeType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMimeType, opts...).ToFunc() +} + +// BySize orders the results by the size field. +func BySize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSize, opts...).ToFunc() +} + +// ByURL orders the results by the url field. +func ByURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURL, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/backend/ent/media/where.go b/backend/ent/media/where.go new file mode 100644 index 0000000..9de2610 --- /dev/null +++ b/backend/ent/media/where.go @@ -0,0 +1,589 @@ +// Code generated by ent, DO NOT EDIT. + +package media + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Media { + return predicate.Media(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Media { + return predicate.Media(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Media { + return predicate.Media(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldID, id)) +} + +// StorageID applies equality check predicate on the "storage_id" field. It's identical to StorageIDEQ. +func StorageID(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldStorageID, v)) +} + +// OriginalName applies equality check predicate on the "original_name" field. It's identical to OriginalNameEQ. +func OriginalName(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldOriginalName, v)) +} + +// MimeType applies equality check predicate on the "mime_type" field. It's identical to MimeTypeEQ. +func MimeType(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldMimeType, v)) +} + +// Size applies equality check predicate on the "size" field. It's identical to SizeEQ. +func Size(v int64) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldSize, v)) +} + +// URL applies equality check predicate on the "url" field. It's identical to URLEQ. +func URL(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldURL, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldCreatedBy, v)) +} + +// StorageIDEQ applies the EQ predicate on the "storage_id" field. +func StorageIDEQ(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldStorageID, v)) +} + +// StorageIDNEQ applies the NEQ predicate on the "storage_id" field. +func StorageIDNEQ(v string) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldStorageID, v)) +} + +// StorageIDIn applies the In predicate on the "storage_id" field. +func StorageIDIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldIn(FieldStorageID, vs...)) +} + +// StorageIDNotIn applies the NotIn predicate on the "storage_id" field. +func StorageIDNotIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldStorageID, vs...)) +} + +// StorageIDGT applies the GT predicate on the "storage_id" field. +func StorageIDGT(v string) predicate.Media { + return predicate.Media(sql.FieldGT(FieldStorageID, v)) +} + +// StorageIDGTE applies the GTE predicate on the "storage_id" field. +func StorageIDGTE(v string) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldStorageID, v)) +} + +// StorageIDLT applies the LT predicate on the "storage_id" field. +func StorageIDLT(v string) predicate.Media { + return predicate.Media(sql.FieldLT(FieldStorageID, v)) +} + +// StorageIDLTE applies the LTE predicate on the "storage_id" field. +func StorageIDLTE(v string) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldStorageID, v)) +} + +// StorageIDContains applies the Contains predicate on the "storage_id" field. +func StorageIDContains(v string) predicate.Media { + return predicate.Media(sql.FieldContains(FieldStorageID, v)) +} + +// StorageIDHasPrefix applies the HasPrefix predicate on the "storage_id" field. +func StorageIDHasPrefix(v string) predicate.Media { + return predicate.Media(sql.FieldHasPrefix(FieldStorageID, v)) +} + +// StorageIDHasSuffix applies the HasSuffix predicate on the "storage_id" field. +func StorageIDHasSuffix(v string) predicate.Media { + return predicate.Media(sql.FieldHasSuffix(FieldStorageID, v)) +} + +// StorageIDEqualFold applies the EqualFold predicate on the "storage_id" field. +func StorageIDEqualFold(v string) predicate.Media { + return predicate.Media(sql.FieldEqualFold(FieldStorageID, v)) +} + +// StorageIDContainsFold applies the ContainsFold predicate on the "storage_id" field. +func StorageIDContainsFold(v string) predicate.Media { + return predicate.Media(sql.FieldContainsFold(FieldStorageID, v)) +} + +// OriginalNameEQ applies the EQ predicate on the "original_name" field. +func OriginalNameEQ(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldOriginalName, v)) +} + +// OriginalNameNEQ applies the NEQ predicate on the "original_name" field. +func OriginalNameNEQ(v string) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldOriginalName, v)) +} + +// OriginalNameIn applies the In predicate on the "original_name" field. +func OriginalNameIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldIn(FieldOriginalName, vs...)) +} + +// OriginalNameNotIn applies the NotIn predicate on the "original_name" field. +func OriginalNameNotIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldOriginalName, vs...)) +} + +// OriginalNameGT applies the GT predicate on the "original_name" field. +func OriginalNameGT(v string) predicate.Media { + return predicate.Media(sql.FieldGT(FieldOriginalName, v)) +} + +// OriginalNameGTE applies the GTE predicate on the "original_name" field. +func OriginalNameGTE(v string) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldOriginalName, v)) +} + +// OriginalNameLT applies the LT predicate on the "original_name" field. +func OriginalNameLT(v string) predicate.Media { + return predicate.Media(sql.FieldLT(FieldOriginalName, v)) +} + +// OriginalNameLTE applies the LTE predicate on the "original_name" field. +func OriginalNameLTE(v string) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldOriginalName, v)) +} + +// OriginalNameContains applies the Contains predicate on the "original_name" field. +func OriginalNameContains(v string) predicate.Media { + return predicate.Media(sql.FieldContains(FieldOriginalName, v)) +} + +// OriginalNameHasPrefix applies the HasPrefix predicate on the "original_name" field. +func OriginalNameHasPrefix(v string) predicate.Media { + return predicate.Media(sql.FieldHasPrefix(FieldOriginalName, v)) +} + +// OriginalNameHasSuffix applies the HasSuffix predicate on the "original_name" field. +func OriginalNameHasSuffix(v string) predicate.Media { + return predicate.Media(sql.FieldHasSuffix(FieldOriginalName, v)) +} + +// OriginalNameEqualFold applies the EqualFold predicate on the "original_name" field. +func OriginalNameEqualFold(v string) predicate.Media { + return predicate.Media(sql.FieldEqualFold(FieldOriginalName, v)) +} + +// OriginalNameContainsFold applies the ContainsFold predicate on the "original_name" field. +func OriginalNameContainsFold(v string) predicate.Media { + return predicate.Media(sql.FieldContainsFold(FieldOriginalName, v)) +} + +// MimeTypeEQ applies the EQ predicate on the "mime_type" field. +func MimeTypeEQ(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldMimeType, v)) +} + +// MimeTypeNEQ applies the NEQ predicate on the "mime_type" field. +func MimeTypeNEQ(v string) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldMimeType, v)) +} + +// MimeTypeIn applies the In predicate on the "mime_type" field. +func MimeTypeIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldIn(FieldMimeType, vs...)) +} + +// MimeTypeNotIn applies the NotIn predicate on the "mime_type" field. +func MimeTypeNotIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldMimeType, vs...)) +} + +// MimeTypeGT applies the GT predicate on the "mime_type" field. +func MimeTypeGT(v string) predicate.Media { + return predicate.Media(sql.FieldGT(FieldMimeType, v)) +} + +// MimeTypeGTE applies the GTE predicate on the "mime_type" field. +func MimeTypeGTE(v string) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldMimeType, v)) +} + +// MimeTypeLT applies the LT predicate on the "mime_type" field. +func MimeTypeLT(v string) predicate.Media { + return predicate.Media(sql.FieldLT(FieldMimeType, v)) +} + +// MimeTypeLTE applies the LTE predicate on the "mime_type" field. +func MimeTypeLTE(v string) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldMimeType, v)) +} + +// MimeTypeContains applies the Contains predicate on the "mime_type" field. +func MimeTypeContains(v string) predicate.Media { + return predicate.Media(sql.FieldContains(FieldMimeType, v)) +} + +// MimeTypeHasPrefix applies the HasPrefix predicate on the "mime_type" field. +func MimeTypeHasPrefix(v string) predicate.Media { + return predicate.Media(sql.FieldHasPrefix(FieldMimeType, v)) +} + +// MimeTypeHasSuffix applies the HasSuffix predicate on the "mime_type" field. +func MimeTypeHasSuffix(v string) predicate.Media { + return predicate.Media(sql.FieldHasSuffix(FieldMimeType, v)) +} + +// MimeTypeEqualFold applies the EqualFold predicate on the "mime_type" field. +func MimeTypeEqualFold(v string) predicate.Media { + return predicate.Media(sql.FieldEqualFold(FieldMimeType, v)) +} + +// MimeTypeContainsFold applies the ContainsFold predicate on the "mime_type" field. +func MimeTypeContainsFold(v string) predicate.Media { + return predicate.Media(sql.FieldContainsFold(FieldMimeType, v)) +} + +// SizeEQ applies the EQ predicate on the "size" field. +func SizeEQ(v int64) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldSize, v)) +} + +// SizeNEQ applies the NEQ predicate on the "size" field. +func SizeNEQ(v int64) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldSize, v)) +} + +// SizeIn applies the In predicate on the "size" field. +func SizeIn(vs ...int64) predicate.Media { + return predicate.Media(sql.FieldIn(FieldSize, vs...)) +} + +// SizeNotIn applies the NotIn predicate on the "size" field. +func SizeNotIn(vs ...int64) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldSize, vs...)) +} + +// SizeGT applies the GT predicate on the "size" field. +func SizeGT(v int64) predicate.Media { + return predicate.Media(sql.FieldGT(FieldSize, v)) +} + +// SizeGTE applies the GTE predicate on the "size" field. +func SizeGTE(v int64) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldSize, v)) +} + +// SizeLT applies the LT predicate on the "size" field. +func SizeLT(v int64) predicate.Media { + return predicate.Media(sql.FieldLT(FieldSize, v)) +} + +// SizeLTE applies the LTE predicate on the "size" field. +func SizeLTE(v int64) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldSize, v)) +} + +// URLEQ applies the EQ predicate on the "url" field. +func URLEQ(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldURL, v)) +} + +// URLNEQ applies the NEQ predicate on the "url" field. +func URLNEQ(v string) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldURL, v)) +} + +// URLIn applies the In predicate on the "url" field. +func URLIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldIn(FieldURL, vs...)) +} + +// URLNotIn applies the NotIn predicate on the "url" field. +func URLNotIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldURL, vs...)) +} + +// URLGT applies the GT predicate on the "url" field. +func URLGT(v string) predicate.Media { + return predicate.Media(sql.FieldGT(FieldURL, v)) +} + +// URLGTE applies the GTE predicate on the "url" field. +func URLGTE(v string) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldURL, v)) +} + +// URLLT applies the LT predicate on the "url" field. +func URLLT(v string) predicate.Media { + return predicate.Media(sql.FieldLT(FieldURL, v)) +} + +// URLLTE applies the LTE predicate on the "url" field. +func URLLTE(v string) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldURL, v)) +} + +// URLContains applies the Contains predicate on the "url" field. +func URLContains(v string) predicate.Media { + return predicate.Media(sql.FieldContains(FieldURL, v)) +} + +// URLHasPrefix applies the HasPrefix predicate on the "url" field. +func URLHasPrefix(v string) predicate.Media { + return predicate.Media(sql.FieldHasPrefix(FieldURL, v)) +} + +// URLHasSuffix applies the HasSuffix predicate on the "url" field. +func URLHasSuffix(v string) predicate.Media { + return predicate.Media(sql.FieldHasSuffix(FieldURL, v)) +} + +// URLEqualFold applies the EqualFold predicate on the "url" field. +func URLEqualFold(v string) predicate.Media { + return predicate.Media(sql.FieldEqualFold(FieldURL, v)) +} + +// URLContainsFold applies the ContainsFold predicate on the "url" field. +func URLContainsFold(v string) predicate.Media { + return predicate.Media(sql.FieldContainsFold(FieldURL, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Media { + return predicate.Media(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Media { + return predicate.Media(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Media { + return predicate.Media(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Media { + return predicate.Media(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Media { + return predicate.Media(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Media { + return predicate.Media(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v string) predicate.Media { + return predicate.Media(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v string) predicate.Media { + return predicate.Media(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...string) predicate.Media { + return predicate.Media(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v string) predicate.Media { + return predicate.Media(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v string) predicate.Media { + return predicate.Media(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v string) predicate.Media { + return predicate.Media(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v string) predicate.Media { + return predicate.Media(sql.FieldLTE(FieldCreatedBy, v)) +} + +// CreatedByContains applies the Contains predicate on the "created_by" field. +func CreatedByContains(v string) predicate.Media { + return predicate.Media(sql.FieldContains(FieldCreatedBy, v)) +} + +// CreatedByHasPrefix applies the HasPrefix predicate on the "created_by" field. +func CreatedByHasPrefix(v string) predicate.Media { + return predicate.Media(sql.FieldHasPrefix(FieldCreatedBy, v)) +} + +// CreatedByHasSuffix applies the HasSuffix predicate on the "created_by" field. +func CreatedByHasSuffix(v string) predicate.Media { + return predicate.Media(sql.FieldHasSuffix(FieldCreatedBy, v)) +} + +// CreatedByIsNil applies the IsNil predicate on the "created_by" field. +func CreatedByIsNil() predicate.Media { + return predicate.Media(sql.FieldIsNull(FieldCreatedBy)) +} + +// CreatedByNotNil applies the NotNil predicate on the "created_by" field. +func CreatedByNotNil() predicate.Media { + return predicate.Media(sql.FieldNotNull(FieldCreatedBy)) +} + +// CreatedByEqualFold applies the EqualFold predicate on the "created_by" field. +func CreatedByEqualFold(v string) predicate.Media { + return predicate.Media(sql.FieldEqualFold(FieldCreatedBy, v)) +} + +// CreatedByContainsFold applies the ContainsFold predicate on the "created_by" field. +func CreatedByContainsFold(v string) predicate.Media { + return predicate.Media(sql.FieldContainsFold(FieldCreatedBy, v)) +} + +// HasOwner applies the HasEdge predicate on the "owner" edge. +func HasOwner() predicate.Media { + return predicate.Media(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). +func HasOwnerWith(preds ...predicate.User) predicate.Media { + return predicate.Media(func(s *sql.Selector) { + step := newOwnerStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Media) predicate.Media { + return predicate.Media(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Media) predicate.Media { + return predicate.Media(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Media) predicate.Media { + return predicate.Media(sql.NotPredicates(p)) +} diff --git a/backend/ent/media_create.go b/backend/ent/media_create.go new file mode 100644 index 0000000..a4c4eb1 --- /dev/null +++ b/backend/ent/media_create.go @@ -0,0 +1,372 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// MediaCreate is the builder for creating a Media entity. +type MediaCreate struct { + config + mutation *MediaMutation + hooks []Hook +} + +// SetStorageID sets the "storage_id" field. +func (mc *MediaCreate) SetStorageID(s string) *MediaCreate { + mc.mutation.SetStorageID(s) + return mc +} + +// SetOriginalName sets the "original_name" field. +func (mc *MediaCreate) SetOriginalName(s string) *MediaCreate { + mc.mutation.SetOriginalName(s) + return mc +} + +// SetMimeType sets the "mime_type" field. +func (mc *MediaCreate) SetMimeType(s string) *MediaCreate { + mc.mutation.SetMimeType(s) + return mc +} + +// SetSize sets the "size" field. +func (mc *MediaCreate) SetSize(i int64) *MediaCreate { + mc.mutation.SetSize(i) + return mc +} + +// SetURL sets the "url" field. +func (mc *MediaCreate) SetURL(s string) *MediaCreate { + mc.mutation.SetURL(s) + return mc +} + +// SetCreatedAt sets the "created_at" field. +func (mc *MediaCreate) SetCreatedAt(t time.Time) *MediaCreate { + mc.mutation.SetCreatedAt(t) + return mc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mc *MediaCreate) SetNillableCreatedAt(t *time.Time) *MediaCreate { + if t != nil { + mc.SetCreatedAt(*t) + } + return mc +} + +// SetUpdatedAt sets the "updated_at" field. +func (mc *MediaCreate) SetUpdatedAt(t time.Time) *MediaCreate { + mc.mutation.SetUpdatedAt(t) + return mc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (mc *MediaCreate) SetNillableUpdatedAt(t *time.Time) *MediaCreate { + if t != nil { + mc.SetUpdatedAt(*t) + } + return mc +} + +// SetCreatedBy sets the "created_by" field. +func (mc *MediaCreate) SetCreatedBy(s string) *MediaCreate { + mc.mutation.SetCreatedBy(s) + return mc +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (mc *MediaCreate) SetNillableCreatedBy(s *string) *MediaCreate { + if s != nil { + mc.SetCreatedBy(*s) + } + return mc +} + +// SetOwnerID sets the "owner" edge to the User entity by ID. +func (mc *MediaCreate) SetOwnerID(id int) *MediaCreate { + mc.mutation.SetOwnerID(id) + return mc +} + +// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil. +func (mc *MediaCreate) SetNillableOwnerID(id *int) *MediaCreate { + if id != nil { + mc = mc.SetOwnerID(*id) + } + return mc +} + +// SetOwner sets the "owner" edge to the User entity. +func (mc *MediaCreate) SetOwner(u *User) *MediaCreate { + return mc.SetOwnerID(u.ID) +} + +// Mutation returns the MediaMutation object of the builder. +func (mc *MediaCreate) Mutation() *MediaMutation { + return mc.mutation +} + +// Save creates the Media in the database. +func (mc *MediaCreate) Save(ctx context.Context) (*Media, error) { + mc.defaults() + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MediaCreate) SaveX(ctx context.Context) *Media { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MediaCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MediaCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mc *MediaCreate) defaults() { + if _, ok := mc.mutation.CreatedAt(); !ok { + v := media.DefaultCreatedAt() + mc.mutation.SetCreatedAt(v) + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + v := media.DefaultUpdatedAt() + mc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MediaCreate) check() error { + if _, ok := mc.mutation.StorageID(); !ok { + return &ValidationError{Name: "storage_id", err: errors.New(`ent: missing required field "Media.storage_id"`)} + } + if v, ok := mc.mutation.StorageID(); ok { + if err := media.StorageIDValidator(v); err != nil { + return &ValidationError{Name: "storage_id", err: fmt.Errorf(`ent: validator failed for field "Media.storage_id": %w`, err)} + } + } + if _, ok := mc.mutation.OriginalName(); !ok { + return &ValidationError{Name: "original_name", err: errors.New(`ent: missing required field "Media.original_name"`)} + } + if v, ok := mc.mutation.OriginalName(); ok { + if err := media.OriginalNameValidator(v); err != nil { + return &ValidationError{Name: "original_name", err: fmt.Errorf(`ent: validator failed for field "Media.original_name": %w`, err)} + } + } + if _, ok := mc.mutation.MimeType(); !ok { + return &ValidationError{Name: "mime_type", err: errors.New(`ent: missing required field "Media.mime_type"`)} + } + if v, ok := mc.mutation.MimeType(); ok { + if err := media.MimeTypeValidator(v); err != nil { + return &ValidationError{Name: "mime_type", err: fmt.Errorf(`ent: validator failed for field "Media.mime_type": %w`, err)} + } + } + if _, ok := mc.mutation.Size(); !ok { + return &ValidationError{Name: "size", err: errors.New(`ent: missing required field "Media.size"`)} + } + if v, ok := mc.mutation.Size(); ok { + if err := media.SizeValidator(v); err != nil { + return &ValidationError{Name: "size", err: fmt.Errorf(`ent: validator failed for field "Media.size": %w`, err)} + } + } + if _, ok := mc.mutation.URL(); !ok { + return &ValidationError{Name: "url", err: errors.New(`ent: missing required field "Media.url"`)} + } + if v, ok := mc.mutation.URL(); ok { + if err := media.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Media.url": %w`, err)} + } + } + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Media.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Media.updated_at"`)} + } + return nil +} + +func (mc *MediaCreate) sqlSave(ctx context.Context) (*Media, error) { + if err := mc.check(); err != nil { + return nil, err + } + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true + return _node, nil +} + +func (mc *MediaCreate) createSpec() (*Media, *sqlgraph.CreateSpec) { + var ( + _node = &Media{config: mc.config} + _spec = sqlgraph.NewCreateSpec(media.Table, sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt)) + ) + if value, ok := mc.mutation.StorageID(); ok { + _spec.SetField(media.FieldStorageID, field.TypeString, value) + _node.StorageID = value + } + if value, ok := mc.mutation.OriginalName(); ok { + _spec.SetField(media.FieldOriginalName, field.TypeString, value) + _node.OriginalName = value + } + if value, ok := mc.mutation.MimeType(); ok { + _spec.SetField(media.FieldMimeType, field.TypeString, value) + _node.MimeType = value + } + if value, ok := mc.mutation.Size(); ok { + _spec.SetField(media.FieldSize, field.TypeInt64, value) + _node.Size = value + } + if value, ok := mc.mutation.URL(); ok { + _spec.SetField(media.FieldURL, field.TypeString, value) + _node.URL = value + } + if value, ok := mc.mutation.CreatedAt(); ok { + _spec.SetField(media.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := mc.mutation.UpdatedAt(); ok { + _spec.SetField(media.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := mc.mutation.CreatedBy(); ok { + _spec.SetField(media.FieldCreatedBy, field.TypeString, value) + _node.CreatedBy = value + } + if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: media.OwnerTable, + Columns: []string{media.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.user_media = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// MediaCreateBulk is the builder for creating many Media entities in bulk. +type MediaCreateBulk struct { + config + err error + builders []*MediaCreate +} + +// Save creates the Media entities in the database. +func (mcb *MediaCreateBulk) Save(ctx context.Context) ([]*Media, error) { + if mcb.err != nil { + return nil, mcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Media, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MediaMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MediaCreateBulk) SaveX(ctx context.Context) []*Media { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MediaCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MediaCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/media_delete.go b/backend/ent/media_delete.go new file mode 100644 index 0000000..1e83398 --- /dev/null +++ b/backend/ent/media_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// MediaDelete is the builder for deleting a Media entity. +type MediaDelete struct { + config + hooks []Hook + mutation *MediaMutation +} + +// Where appends a list predicates to the MediaDelete builder. +func (md *MediaDelete) Where(ps ...predicate.Media) *MediaDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MediaDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MediaDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MediaDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(media.Table, sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt)) + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + md.mutation.done = true + return affected, err +} + +// MediaDeleteOne is the builder for deleting a single Media entity. +type MediaDeleteOne struct { + md *MediaDelete +} + +// Where appends a list predicates to the MediaDelete builder. +func (mdo *MediaDeleteOne) Where(ps ...predicate.Media) *MediaDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + +// Exec executes the deletion query. +func (mdo *MediaDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{media.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MediaDeleteOne) ExecX(ctx context.Context) { + if err := mdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/media_query.go b/backend/ent/media_query.go new file mode 100644 index 0000000..767d987 --- /dev/null +++ b/backend/ent/media_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// MediaQuery is the builder for querying Media entities. +type MediaQuery struct { + config + ctx *QueryContext + order []media.OrderOption + inters []Interceptor + predicates []predicate.Media + withOwner *UserQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MediaQuery builder. +func (mq *MediaQuery) Where(ps ...predicate.Media) *MediaQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit the number of records to be returned by this query. +func (mq *MediaQuery) Limit(limit int) *MediaQuery { + mq.ctx.Limit = &limit + return mq +} + +// Offset to start from. +func (mq *MediaQuery) Offset(offset int) *MediaQuery { + mq.ctx.Offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MediaQuery) Unique(unique bool) *MediaQuery { + mq.ctx.Unique = &unique + return mq +} + +// Order specifies how the records should be ordered. +func (mq *MediaQuery) Order(o ...media.OrderOption) *MediaQuery { + mq.order = append(mq.order, o...) + return mq +} + +// QueryOwner chains the current query on the "owner" edge. +func (mq *MediaQuery) QueryOwner() *UserQuery { + query := (&UserClient{config: mq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := mq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(media.Table, media.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, media.OwnerTable, media.OwnerColumn), + ) + fromU = sqlgraph.SetNeighbors(mq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Media entity from the query. +// Returns a *NotFoundError when no Media was found. +func (mq *MediaQuery) First(ctx context.Context) (*Media, error) { + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{media.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MediaQuery) FirstX(ctx context.Context) *Media { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Media ID from the query. +// Returns a *NotFoundError when no Media ID was found. +func (mq *MediaQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{media.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MediaQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Media entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Media entity is found. +// Returns a *NotFoundError when no Media entities are found. +func (mq *MediaQuery) Only(ctx context.Context) (*Media, error) { + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{media.Label} + default: + return nil, &NotSingularError{media.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MediaQuery) OnlyX(ctx context.Context) *Media { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Media ID in the query. +// Returns a *NotSingularError when more than one Media ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MediaQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{media.Label} + default: + err = &NotSingularError{media.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MediaQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of MediaSlice. +func (mq *MediaQuery) All(ctx context.Context) ([]*Media, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Media, *MediaQuery]() + return withInterceptors[[]*Media](ctx, mq, qr, mq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MediaQuery) AllX(ctx context.Context) []*Media { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Media IDs. +func (mq *MediaQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) + if err = mq.Select(media.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MediaQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MediaQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, mq, querierCount[*MediaQuery](), mq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MediaQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MediaQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MediaQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MediaQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MediaQuery) Clone() *MediaQuery { + if mq == nil { + return nil + } + return &MediaQuery{ + config: mq.config, + ctx: mq.ctx.Clone(), + order: append([]media.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), + predicates: append([]predicate.Media{}, mq.predicates...), + withOwner: mq.withOwner.Clone(), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// WithOwner tells the query-builder to eager-load the nodes that are connected to +// the "owner" edge. The optional arguments are used to configure the query builder of the edge. +func (mq *MediaQuery) WithOwner(opts ...func(*UserQuery)) *MediaQuery { + query := (&UserClient{config: mq.config}).Query() + for _, opt := range opts { + opt(query) + } + mq.withOwner = query + return mq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// StorageID string `json:"storage_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Media.Query(). +// GroupBy(media.FieldStorageID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MediaQuery) GroupBy(field string, fields ...string) *MediaGroupBy { + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MediaGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields + grbuild.label = media.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// StorageID string `json:"storage_id,omitempty"` +// } +// +// client.Media.Query(). +// Select(media.FieldStorageID). +// Scan(ctx, &v) +func (mq *MediaQuery) Select(fields ...string) *MediaSelect { + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MediaSelect{MediaQuery: mq} + sbuild.label = media.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MediaSelect configured with the given aggregations. +func (mq *MediaQuery) Aggregate(fns ...AggregateFunc) *MediaSelect { + return mq.Select().Aggregate(fns...) +} + +func (mq *MediaQuery) prepareQuery(ctx context.Context) error { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { + if !media.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MediaQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Media, error) { + var ( + nodes = []*Media{} + withFKs = mq.withFKs + _spec = mq.querySpec() + loadedTypes = [1]bool{ + mq.withOwner != nil, + } + ) + if mq.withOwner != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, media.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Media).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Media{config: mq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := mq.withOwner; query != nil { + if err := mq.loadOwner(ctx, query, nodes, nil, + func(n *Media, e *User) { n.Edges.Owner = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (mq *MediaQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*Media, init func(*Media), assign func(*Media, *User)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Media) + for i := range nodes { + if nodes[i].user_media == nil { + continue + } + fk := *nodes[i].user_media + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_media" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (mq *MediaQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MediaQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(media.Table, media.Columns, sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true + } + if fields := mq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, media.FieldID) + for i := range fields { + if fields[i] != media.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MediaQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(media.Table) + columns := mq.ctx.Fields + if len(columns) == 0 { + columns = media.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.ctx.Unique != nil && *mq.ctx.Unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MediaGroupBy is the group-by builder for Media entities. +type MediaGroupBy struct { + selector + build *MediaQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MediaGroupBy) Aggregate(fns ...AggregateFunc) *MediaGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the selector query and scans the result into the given value. +func (mgb *MediaGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) + if err := mgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MediaQuery, *MediaGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) +} + +func (mgb *MediaGroupBy) sqlScan(ctx context.Context, root *MediaQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*mgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// MediaSelect is the builder for selecting fields of Media entities. +type MediaSelect struct { + *MediaQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MediaSelect) Aggregate(fns ...AggregateFunc) *MediaSelect { + ms.fns = append(ms.fns, fns...) + return ms +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MediaSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) + if err := ms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MediaQuery, *MediaSelect](ctx, ms.MediaQuery, ms, ms.inters, v) +} + +func (ms *MediaSelect) sqlScan(ctx context.Context, root *MediaQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/media_update.go b/backend/ent/media_update.go new file mode 100644 index 0000000..610d3e9 --- /dev/null +++ b/backend/ent/media_update.go @@ -0,0 +1,629 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// MediaUpdate is the builder for updating Media entities. +type MediaUpdate struct { + config + hooks []Hook + mutation *MediaMutation +} + +// Where appends a list predicates to the MediaUpdate builder. +func (mu *MediaUpdate) Where(ps ...predicate.Media) *MediaUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetStorageID sets the "storage_id" field. +func (mu *MediaUpdate) SetStorageID(s string) *MediaUpdate { + mu.mutation.SetStorageID(s) + return mu +} + +// SetNillableStorageID sets the "storage_id" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableStorageID(s *string) *MediaUpdate { + if s != nil { + mu.SetStorageID(*s) + } + return mu +} + +// SetOriginalName sets the "original_name" field. +func (mu *MediaUpdate) SetOriginalName(s string) *MediaUpdate { + mu.mutation.SetOriginalName(s) + return mu +} + +// SetNillableOriginalName sets the "original_name" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableOriginalName(s *string) *MediaUpdate { + if s != nil { + mu.SetOriginalName(*s) + } + return mu +} + +// SetMimeType sets the "mime_type" field. +func (mu *MediaUpdate) SetMimeType(s string) *MediaUpdate { + mu.mutation.SetMimeType(s) + return mu +} + +// SetNillableMimeType sets the "mime_type" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableMimeType(s *string) *MediaUpdate { + if s != nil { + mu.SetMimeType(*s) + } + return mu +} + +// SetSize sets the "size" field. +func (mu *MediaUpdate) SetSize(i int64) *MediaUpdate { + mu.mutation.ResetSize() + mu.mutation.SetSize(i) + return mu +} + +// SetNillableSize sets the "size" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableSize(i *int64) *MediaUpdate { + if i != nil { + mu.SetSize(*i) + } + return mu +} + +// AddSize adds i to the "size" field. +func (mu *MediaUpdate) AddSize(i int64) *MediaUpdate { + mu.mutation.AddSize(i) + return mu +} + +// SetURL sets the "url" field. +func (mu *MediaUpdate) SetURL(s string) *MediaUpdate { + mu.mutation.SetURL(s) + return mu +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableURL(s *string) *MediaUpdate { + if s != nil { + mu.SetURL(*s) + } + return mu +} + +// SetUpdatedAt sets the "updated_at" field. +func (mu *MediaUpdate) SetUpdatedAt(t time.Time) *MediaUpdate { + mu.mutation.SetUpdatedAt(t) + return mu +} + +// SetCreatedBy sets the "created_by" field. +func (mu *MediaUpdate) SetCreatedBy(s string) *MediaUpdate { + mu.mutation.SetCreatedBy(s) + return mu +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (mu *MediaUpdate) SetNillableCreatedBy(s *string) *MediaUpdate { + if s != nil { + mu.SetCreatedBy(*s) + } + return mu +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (mu *MediaUpdate) ClearCreatedBy() *MediaUpdate { + mu.mutation.ClearCreatedBy() + return mu +} + +// SetOwnerID sets the "owner" edge to the User entity by ID. +func (mu *MediaUpdate) SetOwnerID(id int) *MediaUpdate { + mu.mutation.SetOwnerID(id) + return mu +} + +// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil. +func (mu *MediaUpdate) SetNillableOwnerID(id *int) *MediaUpdate { + if id != nil { + mu = mu.SetOwnerID(*id) + } + return mu +} + +// SetOwner sets the "owner" edge to the User entity. +func (mu *MediaUpdate) SetOwner(u *User) *MediaUpdate { + return mu.SetOwnerID(u.ID) +} + +// Mutation returns the MediaMutation object of the builder. +func (mu *MediaUpdate) Mutation() *MediaMutation { + return mu.mutation +} + +// ClearOwner clears the "owner" edge to the User entity. +func (mu *MediaUpdate) ClearOwner() *MediaUpdate { + mu.mutation.ClearOwner() + return mu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MediaUpdate) Save(ctx context.Context) (int, error) { + mu.defaults() + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MediaUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MediaUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MediaUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (mu *MediaUpdate) defaults() { + if _, ok := mu.mutation.UpdatedAt(); !ok { + v := media.UpdateDefaultUpdatedAt() + mu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mu *MediaUpdate) check() error { + if v, ok := mu.mutation.StorageID(); ok { + if err := media.StorageIDValidator(v); err != nil { + return &ValidationError{Name: "storage_id", err: fmt.Errorf(`ent: validator failed for field "Media.storage_id": %w`, err)} + } + } + if v, ok := mu.mutation.OriginalName(); ok { + if err := media.OriginalNameValidator(v); err != nil { + return &ValidationError{Name: "original_name", err: fmt.Errorf(`ent: validator failed for field "Media.original_name": %w`, err)} + } + } + if v, ok := mu.mutation.MimeType(); ok { + if err := media.MimeTypeValidator(v); err != nil { + return &ValidationError{Name: "mime_type", err: fmt.Errorf(`ent: validator failed for field "Media.mime_type": %w`, err)} + } + } + if v, ok := mu.mutation.Size(); ok { + if err := media.SizeValidator(v); err != nil { + return &ValidationError{Name: "size", err: fmt.Errorf(`ent: validator failed for field "Media.size": %w`, err)} + } + } + if v, ok := mu.mutation.URL(); ok { + if err := media.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Media.url": %w`, err)} + } + } + return nil +} + +func (mu *MediaUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := mu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(media.Table, media.Columns, sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt)) + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.StorageID(); ok { + _spec.SetField(media.FieldStorageID, field.TypeString, value) + } + if value, ok := mu.mutation.OriginalName(); ok { + _spec.SetField(media.FieldOriginalName, field.TypeString, value) + } + if value, ok := mu.mutation.MimeType(); ok { + _spec.SetField(media.FieldMimeType, field.TypeString, value) + } + if value, ok := mu.mutation.Size(); ok { + _spec.SetField(media.FieldSize, field.TypeInt64, value) + } + if value, ok := mu.mutation.AddedSize(); ok { + _spec.AddField(media.FieldSize, field.TypeInt64, value) + } + if value, ok := mu.mutation.URL(); ok { + _spec.SetField(media.FieldURL, field.TypeString, value) + } + if value, ok := mu.mutation.UpdatedAt(); ok { + _spec.SetField(media.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := mu.mutation.CreatedBy(); ok { + _spec.SetField(media.FieldCreatedBy, field.TypeString, value) + } + if mu.mutation.CreatedByCleared() { + _spec.ClearField(media.FieldCreatedBy, field.TypeString) + } + if mu.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: media.OwnerTable, + Columns: []string{media.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := mu.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: media.OwnerTable, + Columns: []string{media.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{media.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + mu.mutation.done = true + return n, nil +} + +// MediaUpdateOne is the builder for updating a single Media entity. +type MediaUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MediaMutation +} + +// SetStorageID sets the "storage_id" field. +func (muo *MediaUpdateOne) SetStorageID(s string) *MediaUpdateOne { + muo.mutation.SetStorageID(s) + return muo +} + +// SetNillableStorageID sets the "storage_id" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableStorageID(s *string) *MediaUpdateOne { + if s != nil { + muo.SetStorageID(*s) + } + return muo +} + +// SetOriginalName sets the "original_name" field. +func (muo *MediaUpdateOne) SetOriginalName(s string) *MediaUpdateOne { + muo.mutation.SetOriginalName(s) + return muo +} + +// SetNillableOriginalName sets the "original_name" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableOriginalName(s *string) *MediaUpdateOne { + if s != nil { + muo.SetOriginalName(*s) + } + return muo +} + +// SetMimeType sets the "mime_type" field. +func (muo *MediaUpdateOne) SetMimeType(s string) *MediaUpdateOne { + muo.mutation.SetMimeType(s) + return muo +} + +// SetNillableMimeType sets the "mime_type" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableMimeType(s *string) *MediaUpdateOne { + if s != nil { + muo.SetMimeType(*s) + } + return muo +} + +// SetSize sets the "size" field. +func (muo *MediaUpdateOne) SetSize(i int64) *MediaUpdateOne { + muo.mutation.ResetSize() + muo.mutation.SetSize(i) + return muo +} + +// SetNillableSize sets the "size" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableSize(i *int64) *MediaUpdateOne { + if i != nil { + muo.SetSize(*i) + } + return muo +} + +// AddSize adds i to the "size" field. +func (muo *MediaUpdateOne) AddSize(i int64) *MediaUpdateOne { + muo.mutation.AddSize(i) + return muo +} + +// SetURL sets the "url" field. +func (muo *MediaUpdateOne) SetURL(s string) *MediaUpdateOne { + muo.mutation.SetURL(s) + return muo +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableURL(s *string) *MediaUpdateOne { + if s != nil { + muo.SetURL(*s) + } + return muo +} + +// SetUpdatedAt sets the "updated_at" field. +func (muo *MediaUpdateOne) SetUpdatedAt(t time.Time) *MediaUpdateOne { + muo.mutation.SetUpdatedAt(t) + return muo +} + +// SetCreatedBy sets the "created_by" field. +func (muo *MediaUpdateOne) SetCreatedBy(s string) *MediaUpdateOne { + muo.mutation.SetCreatedBy(s) + return muo +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableCreatedBy(s *string) *MediaUpdateOne { + if s != nil { + muo.SetCreatedBy(*s) + } + return muo +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (muo *MediaUpdateOne) ClearCreatedBy() *MediaUpdateOne { + muo.mutation.ClearCreatedBy() + return muo +} + +// SetOwnerID sets the "owner" edge to the User entity by ID. +func (muo *MediaUpdateOne) SetOwnerID(id int) *MediaUpdateOne { + muo.mutation.SetOwnerID(id) + return muo +} + +// SetNillableOwnerID sets the "owner" edge to the User entity by ID if the given value is not nil. +func (muo *MediaUpdateOne) SetNillableOwnerID(id *int) *MediaUpdateOne { + if id != nil { + muo = muo.SetOwnerID(*id) + } + return muo +} + +// SetOwner sets the "owner" edge to the User entity. +func (muo *MediaUpdateOne) SetOwner(u *User) *MediaUpdateOne { + return muo.SetOwnerID(u.ID) +} + +// Mutation returns the MediaMutation object of the builder. +func (muo *MediaUpdateOne) Mutation() *MediaMutation { + return muo.mutation +} + +// ClearOwner clears the "owner" edge to the User entity. +func (muo *MediaUpdateOne) ClearOwner() *MediaUpdateOne { + muo.mutation.ClearOwner() + return muo +} + +// Where appends a list predicates to the MediaUpdate builder. +func (muo *MediaUpdateOne) Where(ps ...predicate.Media) *MediaUpdateOne { + muo.mutation.Where(ps...) + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MediaUpdateOne) Select(field string, fields ...string) *MediaUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Media entity. +func (muo *MediaUpdateOne) Save(ctx context.Context) (*Media, error) { + muo.defaults() + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MediaUpdateOne) SaveX(ctx context.Context) *Media { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MediaUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MediaUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (muo *MediaUpdateOne) defaults() { + if _, ok := muo.mutation.UpdatedAt(); !ok { + v := media.UpdateDefaultUpdatedAt() + muo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (muo *MediaUpdateOne) check() error { + if v, ok := muo.mutation.StorageID(); ok { + if err := media.StorageIDValidator(v); err != nil { + return &ValidationError{Name: "storage_id", err: fmt.Errorf(`ent: validator failed for field "Media.storage_id": %w`, err)} + } + } + if v, ok := muo.mutation.OriginalName(); ok { + if err := media.OriginalNameValidator(v); err != nil { + return &ValidationError{Name: "original_name", err: fmt.Errorf(`ent: validator failed for field "Media.original_name": %w`, err)} + } + } + if v, ok := muo.mutation.MimeType(); ok { + if err := media.MimeTypeValidator(v); err != nil { + return &ValidationError{Name: "mime_type", err: fmt.Errorf(`ent: validator failed for field "Media.mime_type": %w`, err)} + } + } + if v, ok := muo.mutation.Size(); ok { + if err := media.SizeValidator(v); err != nil { + return &ValidationError{Name: "size", err: fmt.Errorf(`ent: validator failed for field "Media.size": %w`, err)} + } + } + if v, ok := muo.mutation.URL(); ok { + if err := media.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Media.url": %w`, err)} + } + } + return nil +} + +func (muo *MediaUpdateOne) sqlSave(ctx context.Context) (_node *Media, err error) { + if err := muo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(media.Table, media.Columns, sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt)) + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Media.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, media.FieldID) + for _, f := range fields { + if !media.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != media.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.StorageID(); ok { + _spec.SetField(media.FieldStorageID, field.TypeString, value) + } + if value, ok := muo.mutation.OriginalName(); ok { + _spec.SetField(media.FieldOriginalName, field.TypeString, value) + } + if value, ok := muo.mutation.MimeType(); ok { + _spec.SetField(media.FieldMimeType, field.TypeString, value) + } + if value, ok := muo.mutation.Size(); ok { + _spec.SetField(media.FieldSize, field.TypeInt64, value) + } + if value, ok := muo.mutation.AddedSize(); ok { + _spec.AddField(media.FieldSize, field.TypeInt64, value) + } + if value, ok := muo.mutation.URL(); ok { + _spec.SetField(media.FieldURL, field.TypeString, value) + } + if value, ok := muo.mutation.UpdatedAt(); ok { + _spec.SetField(media.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := muo.mutation.CreatedBy(); ok { + _spec.SetField(media.FieldCreatedBy, field.TypeString, value) + } + if muo.mutation.CreatedByCleared() { + _spec.ClearField(media.FieldCreatedBy, field.TypeString) + } + if muo.mutation.OwnerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: media.OwnerTable, + Columns: []string{media.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := muo.mutation.OwnerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: media.OwnerTable, + Columns: []string{media.OwnerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Media{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{media.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + muo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/migrate/migrate.go b/backend/ent/migrate/migrate.go new file mode 100644 index 0000000..1956a6b --- /dev/null +++ b/backend/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go new file mode 100644 index 0000000..bb6da6d --- /dev/null +++ b/backend/ent/migrate/schema.go @@ -0,0 +1,479 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // CategoriesColumns holds the columns for the "categories" table. + CategoriesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // CategoriesTable holds the schema information for the "categories" table. + CategoriesTable = &schema.Table{ + Name: "categories", + Columns: CategoriesColumns, + PrimaryKey: []*schema.Column{CategoriesColumns[0]}, + } + // CategoryContentsColumns holds the columns for the "category_contents" table. + CategoryContentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "language_code", Type: field.TypeEnum, Enums: []string{"en", "zh-Hans", "zh-Hant"}}, + {Name: "name", Type: field.TypeString}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "slug", Type: field.TypeString}, + {Name: "category_contents", Type: field.TypeInt, Nullable: true}, + } + // CategoryContentsTable holds the schema information for the "category_contents" table. + CategoryContentsTable = &schema.Table{ + Name: "category_contents", + Columns: CategoryContentsColumns, + PrimaryKey: []*schema.Column{CategoryContentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "category_contents_categories_contents", + Columns: []*schema.Column{CategoryContentsColumns[5]}, + RefColumns: []*schema.Column{CategoriesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "categorycontent_language_code_slug", + Unique: true, + Columns: []*schema.Column{CategoryContentsColumns[1], CategoryContentsColumns[4]}, + }, + { + Name: "categorycontent_language_code_category_contents", + Unique: true, + Columns: []*schema.Column{CategoryContentsColumns[1], CategoryContentsColumns[5]}, + }, + }, + } + // ContributorsColumns holds the columns for the "contributors" table. + ContributorsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString}, + {Name: "avatar_url", Type: field.TypeString, Nullable: true}, + {Name: "bio", Type: field.TypeString, Nullable: true, Size: 2147483647}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "user_contributors", Type: field.TypeInt, Nullable: true}, + } + // ContributorsTable holds the schema information for the "contributors" table. + ContributorsTable = &schema.Table{ + Name: "contributors", + Columns: ContributorsColumns, + PrimaryKey: []*schema.Column{ContributorsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "contributors_users_contributors", + Columns: []*schema.Column{ContributorsColumns[6]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // ContributorRolesColumns holds the columns for the "contributor_roles" table. + ContributorRolesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Unique: true}, + } + // ContributorRolesTable holds the schema information for the "contributor_roles" table. + ContributorRolesTable = &schema.Table{ + Name: "contributor_roles", + Columns: ContributorRolesColumns, + PrimaryKey: []*schema.Column{ContributorRolesColumns[0]}, + } + // ContributorSocialLinksColumns holds the columns for the "contributor_social_links" table. + ContributorSocialLinksColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "type", Type: field.TypeEnum, Enums: []string{"twitter", "facebook", "instagram", "linkedin", "github", "website"}}, + {Name: "name", Type: field.TypeString, Nullable: true}, + {Name: "value", Type: field.TypeString}, + {Name: "contributor_social_links", Type: field.TypeInt, Nullable: true}, + } + // ContributorSocialLinksTable holds the schema information for the "contributor_social_links" table. + ContributorSocialLinksTable = &schema.Table{ + Name: "contributor_social_links", + Columns: ContributorSocialLinksColumns, + PrimaryKey: []*schema.Column{ContributorSocialLinksColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "contributor_social_links_contributors_social_links", + Columns: []*schema.Column{ContributorSocialLinksColumns[4]}, + RefColumns: []*schema.Column{ContributorsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // DailiesColumns holds the columns for the "dailies" table. + DailiesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString, Unique: true, Size: 6}, + {Name: "image_url", Type: field.TypeString}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "category_daily_items", Type: field.TypeInt}, + {Name: "daily_category_daily_items", Type: field.TypeInt, Nullable: true}, + } + // DailiesTable holds the schema information for the "dailies" table. + DailiesTable = &schema.Table{ + Name: "dailies", + Columns: DailiesColumns, + PrimaryKey: []*schema.Column{DailiesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "dailies_categories_daily_items", + Columns: []*schema.Column{DailiesColumns[4]}, + RefColumns: []*schema.Column{CategoriesColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "dailies_daily_categories_daily_items", + Columns: []*schema.Column{DailiesColumns[5]}, + RefColumns: []*schema.Column{DailyCategoriesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // DailyCategoriesColumns holds the columns for the "daily_categories" table. + DailyCategoriesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // DailyCategoriesTable holds the schema information for the "daily_categories" table. + DailyCategoriesTable = &schema.Table{ + Name: "daily_categories", + Columns: DailyCategoriesColumns, + PrimaryKey: []*schema.Column{DailyCategoriesColumns[0]}, + } + // DailyCategoryContentsColumns holds the columns for the "daily_category_contents" table. + DailyCategoryContentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "language_code", Type: field.TypeEnum, Enums: []string{"en", "zh-Hans", "zh-Hant"}}, + {Name: "name", Type: field.TypeString}, + {Name: "daily_category_contents", Type: field.TypeInt, Nullable: true}, + } + // DailyCategoryContentsTable holds the schema information for the "daily_category_contents" table. + DailyCategoryContentsTable = &schema.Table{ + Name: "daily_category_contents", + Columns: DailyCategoryContentsColumns, + PrimaryKey: []*schema.Column{DailyCategoryContentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "daily_category_contents_daily_categories_contents", + Columns: []*schema.Column{DailyCategoryContentsColumns[3]}, + RefColumns: []*schema.Column{DailyCategoriesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "dailycategorycontent_language_code_daily_category_contents", + Unique: true, + Columns: []*schema.Column{DailyCategoryContentsColumns[1], DailyCategoryContentsColumns[3]}, + }, + }, + } + // DailyContentsColumns holds the columns for the "daily_contents" table. + DailyContentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "language_code", Type: field.TypeEnum, Enums: []string{"en", "zh-Hans", "zh-Hant"}}, + {Name: "quote", Type: field.TypeString, Size: 2147483647}, + {Name: "daily_contents", Type: field.TypeString, Nullable: true, Size: 6}, + } + // DailyContentsTable holds the schema information for the "daily_contents" table. + DailyContentsTable = &schema.Table{ + Name: "daily_contents", + Columns: DailyContentsColumns, + PrimaryKey: []*schema.Column{DailyContentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "daily_contents_dailies_contents", + Columns: []*schema.Column{DailyContentsColumns[3]}, + RefColumns: []*schema.Column{DailiesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "dailycontent_language_code_daily_contents", + Unique: true, + Columns: []*schema.Column{DailyContentsColumns[1], DailyContentsColumns[3]}, + }, + }, + } + // MediaColumns holds the columns for the "media" table. + MediaColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "storage_id", Type: field.TypeString, Unique: true}, + {Name: "original_name", Type: field.TypeString}, + {Name: "mime_type", Type: field.TypeString}, + {Name: "size", Type: field.TypeInt64}, + {Name: "url", Type: field.TypeString}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "created_by", Type: field.TypeString, Nullable: true}, + {Name: "user_media", Type: field.TypeInt, Nullable: true}, + } + // MediaTable holds the schema information for the "media" table. + MediaTable = &schema.Table{ + Name: "media", + Columns: MediaColumns, + PrimaryKey: []*schema.Column{MediaColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "media_users_media", + Columns: []*schema.Column{MediaColumns[9]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // PermissionsColumns holds the columns for the "permissions" table. + PermissionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "resource", Type: field.TypeString}, + {Name: "action", Type: field.TypeString}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // PermissionsTable holds the schema information for the "permissions" table. + PermissionsTable = &schema.Table{ + Name: "permissions", + Columns: PermissionsColumns, + PrimaryKey: []*schema.Column{PermissionsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "permission_resource_action", + Unique: true, + Columns: []*schema.Column{PermissionsColumns[1], PermissionsColumns[2]}, + }, + }, + } + // PostsColumns holds the columns for the "posts" table. + PostsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"draft", "published", "archived"}, Default: "draft"}, + {Name: "slug", Type: field.TypeString, Unique: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "category_posts", Type: field.TypeInt, Nullable: true}, + } + // PostsTable holds the schema information for the "posts" table. + PostsTable = &schema.Table{ + Name: "posts", + Columns: PostsColumns, + PrimaryKey: []*schema.Column{PostsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "posts_categories_posts", + Columns: []*schema.Column{PostsColumns[5]}, + RefColumns: []*schema.Column{CategoriesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // PostContentsColumns holds the columns for the "post_contents" table. + PostContentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "language_code", Type: field.TypeEnum, Enums: []string{"en", "zh-Hans", "zh-Hant"}}, + {Name: "title", Type: field.TypeString}, + {Name: "content_markdown", Type: field.TypeString, Size: 2147483647}, + {Name: "summary", Type: field.TypeString}, + {Name: "meta_keywords", Type: field.TypeString, Nullable: true}, + {Name: "meta_description", Type: field.TypeString, Nullable: true}, + {Name: "slug", Type: field.TypeString}, + {Name: "post_contents", Type: field.TypeInt, Nullable: true}, + } + // PostContentsTable holds the schema information for the "post_contents" table. + PostContentsTable = &schema.Table{ + Name: "post_contents", + Columns: PostContentsColumns, + PrimaryKey: []*schema.Column{PostContentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "post_contents_posts_contents", + Columns: []*schema.Column{PostContentsColumns[8]}, + RefColumns: []*schema.Column{PostsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "postcontent_language_code_post_contents", + Unique: true, + Columns: []*schema.Column{PostContentsColumns[1], PostContentsColumns[8]}, + }, + }, + } + // PostContributorsColumns holds the columns for the "post_contributors" table. + PostContributorsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "language_code", Type: field.TypeEnum, Nullable: true, Enums: []string{"en", "zh-Hans", "zh-Hant"}}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "contributor_posts", Type: field.TypeInt, Nullable: true}, + {Name: "contributor_role_post_contributors", Type: field.TypeInt, Nullable: true}, + {Name: "post_contributors", Type: field.TypeInt, Nullable: true}, + } + // PostContributorsTable holds the schema information for the "post_contributors" table. + PostContributorsTable = &schema.Table{ + Name: "post_contributors", + Columns: PostContributorsColumns, + PrimaryKey: []*schema.Column{PostContributorsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "post_contributors_contributors_posts", + Columns: []*schema.Column{PostContributorsColumns[3]}, + RefColumns: []*schema.Column{ContributorsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "post_contributors_contributor_roles_post_contributors", + Columns: []*schema.Column{PostContributorsColumns[4]}, + RefColumns: []*schema.Column{ContributorRolesColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "post_contributors_posts_contributors", + Columns: []*schema.Column{PostContributorsColumns[5]}, + RefColumns: []*schema.Column{PostsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "postcontributor_language_code_post_contributors_contributor_posts_contributor_role_post_contributors", + Unique: true, + Columns: []*schema.Column{PostContributorsColumns[1], PostContributorsColumns[5], PostContributorsColumns[3], PostContributorsColumns[4]}, + }, + }, + } + // RolesColumns holds the columns for the "roles" table. + RolesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Unique: true}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // RolesTable holds the schema information for the "roles" table. + RolesTable = &schema.Table{ + Name: "roles", + Columns: RolesColumns, + PrimaryKey: []*schema.Column{RolesColumns[0]}, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "email", Type: field.TypeString, Unique: true}, + {Name: "password_hash", Type: field.TypeString}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"active", "inactive", "banned"}, Default: "active"}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + } + // RolePermissionsColumns holds the columns for the "role_permissions" table. + RolePermissionsColumns = []*schema.Column{ + {Name: "role_id", Type: field.TypeInt}, + {Name: "permission_id", Type: field.TypeInt}, + } + // RolePermissionsTable holds the schema information for the "role_permissions" table. + RolePermissionsTable = &schema.Table{ + Name: "role_permissions", + Columns: RolePermissionsColumns, + PrimaryKey: []*schema.Column{RolePermissionsColumns[0], RolePermissionsColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "role_permissions_role_id", + Columns: []*schema.Column{RolePermissionsColumns[0]}, + RefColumns: []*schema.Column{RolesColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "role_permissions_permission_id", + Columns: []*schema.Column{RolePermissionsColumns[1]}, + RefColumns: []*schema.Column{PermissionsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // UserRolesColumns holds the columns for the "user_roles" table. + UserRolesColumns = []*schema.Column{ + {Name: "user_id", Type: field.TypeInt}, + {Name: "role_id", Type: field.TypeInt}, + } + // UserRolesTable holds the schema information for the "user_roles" table. + UserRolesTable = &schema.Table{ + Name: "user_roles", + Columns: UserRolesColumns, + PrimaryKey: []*schema.Column{UserRolesColumns[0], UserRolesColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_roles_user_id", + Columns: []*schema.Column{UserRolesColumns[0]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "user_roles_role_id", + Columns: []*schema.Column{UserRolesColumns[1]}, + RefColumns: []*schema.Column{RolesColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + CategoriesTable, + CategoryContentsTable, + ContributorsTable, + ContributorRolesTable, + ContributorSocialLinksTable, + DailiesTable, + DailyCategoriesTable, + DailyCategoryContentsTable, + DailyContentsTable, + MediaTable, + PermissionsTable, + PostsTable, + PostContentsTable, + PostContributorsTable, + RolesTable, + UsersTable, + RolePermissionsTable, + UserRolesTable, + } +) + +func init() { + CategoryContentsTable.ForeignKeys[0].RefTable = CategoriesTable + ContributorsTable.ForeignKeys[0].RefTable = UsersTable + ContributorSocialLinksTable.ForeignKeys[0].RefTable = ContributorsTable + DailiesTable.ForeignKeys[0].RefTable = CategoriesTable + DailiesTable.ForeignKeys[1].RefTable = DailyCategoriesTable + DailyCategoryContentsTable.ForeignKeys[0].RefTable = DailyCategoriesTable + DailyContentsTable.ForeignKeys[0].RefTable = DailiesTable + MediaTable.ForeignKeys[0].RefTable = UsersTable + PostsTable.ForeignKeys[0].RefTable = CategoriesTable + PostContentsTable.ForeignKeys[0].RefTable = PostsTable + PostContributorsTable.ForeignKeys[0].RefTable = ContributorsTable + PostContributorsTable.ForeignKeys[1].RefTable = ContributorRolesTable + PostContributorsTable.ForeignKeys[2].RefTable = PostsTable + RolePermissionsTable.ForeignKeys[0].RefTable = RolesTable + RolePermissionsTable.ForeignKeys[1].RefTable = PermissionsTable + UserRolesTable.ForeignKeys[0].RefTable = UsersTable + UserRolesTable.ForeignKeys[1].RefTable = RolesTable +} diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go new file mode 100644 index 0000000..26cb34e --- /dev/null +++ b/backend/ent/mutation.go @@ -0,0 +1,10117 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeCategory = "Category" + TypeCategoryContent = "CategoryContent" + TypeContributor = "Contributor" + TypeContributorRole = "ContributorRole" + TypeContributorSocialLink = "ContributorSocialLink" + TypeDaily = "Daily" + TypeDailyCategory = "DailyCategory" + TypeDailyCategoryContent = "DailyCategoryContent" + TypeDailyContent = "DailyContent" + TypeMedia = "Media" + TypePermission = "Permission" + TypePost = "Post" + TypePostContent = "PostContent" + TypePostContributor = "PostContributor" + TypeRole = "Role" + TypeUser = "User" +) + +// CategoryMutation represents an operation that mutates the Category nodes in the graph. +type CategoryMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + contents map[int]struct{} + removedcontents map[int]struct{} + clearedcontents bool + posts map[int]struct{} + removedposts map[int]struct{} + clearedposts bool + daily_items map[string]struct{} + removeddaily_items map[string]struct{} + cleareddaily_items bool + done bool + oldValue func(context.Context) (*Category, error) + predicates []predicate.Category +} + +var _ ent.Mutation = (*CategoryMutation)(nil) + +// categoryOption allows management of the mutation configuration using functional options. +type categoryOption func(*CategoryMutation) + +// newCategoryMutation creates new mutation for the Category entity. +func newCategoryMutation(c config, op Op, opts ...categoryOption) *CategoryMutation { + m := &CategoryMutation{ + config: c, + op: op, + typ: TypeCategory, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withCategoryID sets the ID field of the mutation. +func withCategoryID(id int) categoryOption { + return func(m *CategoryMutation) { + var ( + err error + once sync.Once + value *Category + ) + m.oldValue = func(ctx context.Context) (*Category, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Category.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withCategory sets the old Category of the mutation. +func withCategory(node *Category) categoryOption { + return func(m *CategoryMutation) { + m.oldValue = func(context.Context) (*Category, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m CategoryMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m CategoryMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *CategoryMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *CategoryMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Category.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *CategoryMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *CategoryMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Category entity. +// If the Category object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *CategoryMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *CategoryMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *CategoryMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Category entity. +// If the Category object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *CategoryMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddContentIDs adds the "contents" edge to the CategoryContent entity by ids. +func (m *CategoryMutation) AddContentIDs(ids ...int) { + if m.contents == nil { + m.contents = make(map[int]struct{}) + } + for i := range ids { + m.contents[ids[i]] = struct{}{} + } +} + +// ClearContents clears the "contents" edge to the CategoryContent entity. +func (m *CategoryMutation) ClearContents() { + m.clearedcontents = true +} + +// ContentsCleared reports if the "contents" edge to the CategoryContent entity was cleared. +func (m *CategoryMutation) ContentsCleared() bool { + return m.clearedcontents +} + +// RemoveContentIDs removes the "contents" edge to the CategoryContent entity by IDs. +func (m *CategoryMutation) RemoveContentIDs(ids ...int) { + if m.removedcontents == nil { + m.removedcontents = make(map[int]struct{}) + } + for i := range ids { + delete(m.contents, ids[i]) + m.removedcontents[ids[i]] = struct{}{} + } +} + +// RemovedContents returns the removed IDs of the "contents" edge to the CategoryContent entity. +func (m *CategoryMutation) RemovedContentsIDs() (ids []int) { + for id := range m.removedcontents { + ids = append(ids, id) + } + return +} + +// ContentsIDs returns the "contents" edge IDs in the mutation. +func (m *CategoryMutation) ContentsIDs() (ids []int) { + for id := range m.contents { + ids = append(ids, id) + } + return +} + +// ResetContents resets all changes to the "contents" edge. +func (m *CategoryMutation) ResetContents() { + m.contents = nil + m.clearedcontents = false + m.removedcontents = nil +} + +// AddPostIDs adds the "posts" edge to the Post entity by ids. +func (m *CategoryMutation) AddPostIDs(ids ...int) { + if m.posts == nil { + m.posts = make(map[int]struct{}) + } + for i := range ids { + m.posts[ids[i]] = struct{}{} + } +} + +// ClearPosts clears the "posts" edge to the Post entity. +func (m *CategoryMutation) ClearPosts() { + m.clearedposts = true +} + +// PostsCleared reports if the "posts" edge to the Post entity was cleared. +func (m *CategoryMutation) PostsCleared() bool { + return m.clearedposts +} + +// RemovePostIDs removes the "posts" edge to the Post entity by IDs. +func (m *CategoryMutation) RemovePostIDs(ids ...int) { + if m.removedposts == nil { + m.removedposts = make(map[int]struct{}) + } + for i := range ids { + delete(m.posts, ids[i]) + m.removedposts[ids[i]] = struct{}{} + } +} + +// RemovedPosts returns the removed IDs of the "posts" edge to the Post entity. +func (m *CategoryMutation) RemovedPostsIDs() (ids []int) { + for id := range m.removedposts { + ids = append(ids, id) + } + return +} + +// PostsIDs returns the "posts" edge IDs in the mutation. +func (m *CategoryMutation) PostsIDs() (ids []int) { + for id := range m.posts { + ids = append(ids, id) + } + return +} + +// ResetPosts resets all changes to the "posts" edge. +func (m *CategoryMutation) ResetPosts() { + m.posts = nil + m.clearedposts = false + m.removedposts = nil +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by ids. +func (m *CategoryMutation) AddDailyItemIDs(ids ...string) { + if m.daily_items == nil { + m.daily_items = make(map[string]struct{}) + } + for i := range ids { + m.daily_items[ids[i]] = struct{}{} + } +} + +// ClearDailyItems clears the "daily_items" edge to the Daily entity. +func (m *CategoryMutation) ClearDailyItems() { + m.cleareddaily_items = true +} + +// DailyItemsCleared reports if the "daily_items" edge to the Daily entity was cleared. +func (m *CategoryMutation) DailyItemsCleared() bool { + return m.cleareddaily_items +} + +// RemoveDailyItemIDs removes the "daily_items" edge to the Daily entity by IDs. +func (m *CategoryMutation) RemoveDailyItemIDs(ids ...string) { + if m.removeddaily_items == nil { + m.removeddaily_items = make(map[string]struct{}) + } + for i := range ids { + delete(m.daily_items, ids[i]) + m.removeddaily_items[ids[i]] = struct{}{} + } +} + +// RemovedDailyItems returns the removed IDs of the "daily_items" edge to the Daily entity. +func (m *CategoryMutation) RemovedDailyItemsIDs() (ids []string) { + for id := range m.removeddaily_items { + ids = append(ids, id) + } + return +} + +// DailyItemsIDs returns the "daily_items" edge IDs in the mutation. +func (m *CategoryMutation) DailyItemsIDs() (ids []string) { + for id := range m.daily_items { + ids = append(ids, id) + } + return +} + +// ResetDailyItems resets all changes to the "daily_items" edge. +func (m *CategoryMutation) ResetDailyItems() { + m.daily_items = nil + m.cleareddaily_items = false + m.removeddaily_items = nil +} + +// Where appends a list predicates to the CategoryMutation builder. +func (m *CategoryMutation) Where(ps ...predicate.Category) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the CategoryMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *CategoryMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Category, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *CategoryMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *CategoryMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Category). +func (m *CategoryMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *CategoryMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.created_at != nil { + fields = append(fields, category.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, category.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *CategoryMutation) Field(name string) (ent.Value, bool) { + switch name { + case category.FieldCreatedAt: + return m.CreatedAt() + case category.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *CategoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case category.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case category.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Category field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CategoryMutation) SetField(name string, value ent.Value) error { + switch name { + case category.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case category.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Category field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *CategoryMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *CategoryMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CategoryMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Category numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *CategoryMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *CategoryMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *CategoryMutation) ClearField(name string) error { + return fmt.Errorf("unknown Category nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *CategoryMutation) ResetField(name string) error { + switch name { + case category.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case category.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Category field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *CategoryMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.contents != nil { + edges = append(edges, category.EdgeContents) + } + if m.posts != nil { + edges = append(edges, category.EdgePosts) + } + if m.daily_items != nil { + edges = append(edges, category.EdgeDailyItems) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *CategoryMutation) AddedIDs(name string) []ent.Value { + switch name { + case category.EdgeContents: + ids := make([]ent.Value, 0, len(m.contents)) + for id := range m.contents { + ids = append(ids, id) + } + return ids + case category.EdgePosts: + ids := make([]ent.Value, 0, len(m.posts)) + for id := range m.posts { + ids = append(ids, id) + } + return ids + case category.EdgeDailyItems: + ids := make([]ent.Value, 0, len(m.daily_items)) + for id := range m.daily_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *CategoryMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedcontents != nil { + edges = append(edges, category.EdgeContents) + } + if m.removedposts != nil { + edges = append(edges, category.EdgePosts) + } + if m.removeddaily_items != nil { + edges = append(edges, category.EdgeDailyItems) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *CategoryMutation) RemovedIDs(name string) []ent.Value { + switch name { + case category.EdgeContents: + ids := make([]ent.Value, 0, len(m.removedcontents)) + for id := range m.removedcontents { + ids = append(ids, id) + } + return ids + case category.EdgePosts: + ids := make([]ent.Value, 0, len(m.removedposts)) + for id := range m.removedposts { + ids = append(ids, id) + } + return ids + case category.EdgeDailyItems: + ids := make([]ent.Value, 0, len(m.removeddaily_items)) + for id := range m.removeddaily_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *CategoryMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedcontents { + edges = append(edges, category.EdgeContents) + } + if m.clearedposts { + edges = append(edges, category.EdgePosts) + } + if m.cleareddaily_items { + edges = append(edges, category.EdgeDailyItems) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *CategoryMutation) EdgeCleared(name string) bool { + switch name { + case category.EdgeContents: + return m.clearedcontents + case category.EdgePosts: + return m.clearedposts + case category.EdgeDailyItems: + return m.cleareddaily_items + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *CategoryMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Category unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *CategoryMutation) ResetEdge(name string) error { + switch name { + case category.EdgeContents: + m.ResetContents() + return nil + case category.EdgePosts: + m.ResetPosts() + return nil + case category.EdgeDailyItems: + m.ResetDailyItems() + return nil + } + return fmt.Errorf("unknown Category edge %s", name) +} + +// CategoryContentMutation represents an operation that mutates the CategoryContent nodes in the graph. +type CategoryContentMutation struct { + config + op Op + typ string + id *int + language_code *categorycontent.LanguageCode + name *string + description *string + slug *string + clearedFields map[string]struct{} + category *int + clearedcategory bool + done bool + oldValue func(context.Context) (*CategoryContent, error) + predicates []predicate.CategoryContent +} + +var _ ent.Mutation = (*CategoryContentMutation)(nil) + +// categorycontentOption allows management of the mutation configuration using functional options. +type categorycontentOption func(*CategoryContentMutation) + +// newCategoryContentMutation creates new mutation for the CategoryContent entity. +func newCategoryContentMutation(c config, op Op, opts ...categorycontentOption) *CategoryContentMutation { + m := &CategoryContentMutation{ + config: c, + op: op, + typ: TypeCategoryContent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withCategoryContentID sets the ID field of the mutation. +func withCategoryContentID(id int) categorycontentOption { + return func(m *CategoryContentMutation) { + var ( + err error + once sync.Once + value *CategoryContent + ) + m.oldValue = func(ctx context.Context) (*CategoryContent, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().CategoryContent.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withCategoryContent sets the old CategoryContent of the mutation. +func withCategoryContent(node *CategoryContent) categorycontentOption { + return func(m *CategoryContentMutation) { + m.oldValue = func(context.Context) (*CategoryContent, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m CategoryContentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m CategoryContentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *CategoryContentMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *CategoryContentMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().CategoryContent.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetLanguageCode sets the "language_code" field. +func (m *CategoryContentMutation) SetLanguageCode(cc categorycontent.LanguageCode) { + m.language_code = &cc +} + +// LanguageCode returns the value of the "language_code" field in the mutation. +func (m *CategoryContentMutation) LanguageCode() (r categorycontent.LanguageCode, exists bool) { + v := m.language_code + if v == nil { + return + } + return *v, true +} + +// OldLanguageCode returns the old "language_code" field's value of the CategoryContent entity. +// If the CategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryContentMutation) OldLanguageCode(ctx context.Context) (v categorycontent.LanguageCode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLanguageCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLanguageCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLanguageCode: %w", err) + } + return oldValue.LanguageCode, nil +} + +// ResetLanguageCode resets all changes to the "language_code" field. +func (m *CategoryContentMutation) ResetLanguageCode() { + m.language_code = nil +} + +// SetName sets the "name" field. +func (m *CategoryContentMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *CategoryContentMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the CategoryContent entity. +// If the CategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryContentMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *CategoryContentMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *CategoryContentMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *CategoryContentMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the CategoryContent entity. +// If the CategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryContentMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *CategoryContentMutation) ClearDescription() { + m.description = nil + m.clearedFields[categorycontent.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *CategoryContentMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[categorycontent.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *CategoryContentMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, categorycontent.FieldDescription) +} + +// SetSlug sets the "slug" field. +func (m *CategoryContentMutation) SetSlug(s string) { + m.slug = &s +} + +// Slug returns the value of the "slug" field in the mutation. +func (m *CategoryContentMutation) Slug() (r string, exists bool) { + v := m.slug + if v == nil { + return + } + return *v, true +} + +// OldSlug returns the old "slug" field's value of the CategoryContent entity. +// If the CategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *CategoryContentMutation) OldSlug(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSlug is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSlug requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSlug: %w", err) + } + return oldValue.Slug, nil +} + +// ResetSlug resets all changes to the "slug" field. +func (m *CategoryContentMutation) ResetSlug() { + m.slug = nil +} + +// SetCategoryID sets the "category" edge to the Category entity by id. +func (m *CategoryContentMutation) SetCategoryID(id int) { + m.category = &id +} + +// ClearCategory clears the "category" edge to the Category entity. +func (m *CategoryContentMutation) ClearCategory() { + m.clearedcategory = true +} + +// CategoryCleared reports if the "category" edge to the Category entity was cleared. +func (m *CategoryContentMutation) CategoryCleared() bool { + return m.clearedcategory +} + +// CategoryID returns the "category" edge ID in the mutation. +func (m *CategoryContentMutation) CategoryID() (id int, exists bool) { + if m.category != nil { + return *m.category, true + } + return +} + +// CategoryIDs returns the "category" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// CategoryID instead. It exists only for internal usage by the builders. +func (m *CategoryContentMutation) CategoryIDs() (ids []int) { + if id := m.category; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetCategory resets all changes to the "category" edge. +func (m *CategoryContentMutation) ResetCategory() { + m.category = nil + m.clearedcategory = false +} + +// Where appends a list predicates to the CategoryContentMutation builder. +func (m *CategoryContentMutation) Where(ps ...predicate.CategoryContent) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the CategoryContentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *CategoryContentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.CategoryContent, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *CategoryContentMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *CategoryContentMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (CategoryContent). +func (m *CategoryContentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *CategoryContentMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.language_code != nil { + fields = append(fields, categorycontent.FieldLanguageCode) + } + if m.name != nil { + fields = append(fields, categorycontent.FieldName) + } + if m.description != nil { + fields = append(fields, categorycontent.FieldDescription) + } + if m.slug != nil { + fields = append(fields, categorycontent.FieldSlug) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *CategoryContentMutation) Field(name string) (ent.Value, bool) { + switch name { + case categorycontent.FieldLanguageCode: + return m.LanguageCode() + case categorycontent.FieldName: + return m.Name() + case categorycontent.FieldDescription: + return m.Description() + case categorycontent.FieldSlug: + return m.Slug() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *CategoryContentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case categorycontent.FieldLanguageCode: + return m.OldLanguageCode(ctx) + case categorycontent.FieldName: + return m.OldName(ctx) + case categorycontent.FieldDescription: + return m.OldDescription(ctx) + case categorycontent.FieldSlug: + return m.OldSlug(ctx) + } + return nil, fmt.Errorf("unknown CategoryContent field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CategoryContentMutation) SetField(name string, value ent.Value) error { + switch name { + case categorycontent.FieldLanguageCode: + v, ok := value.(categorycontent.LanguageCode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLanguageCode(v) + return nil + case categorycontent.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case categorycontent.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case categorycontent.FieldSlug: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSlug(v) + return nil + } + return fmt.Errorf("unknown CategoryContent field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *CategoryContentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *CategoryContentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *CategoryContentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown CategoryContent numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *CategoryContentMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(categorycontent.FieldDescription) { + fields = append(fields, categorycontent.FieldDescription) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *CategoryContentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *CategoryContentMutation) ClearField(name string) error { + switch name { + case categorycontent.FieldDescription: + m.ClearDescription() + return nil + } + return fmt.Errorf("unknown CategoryContent nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *CategoryContentMutation) ResetField(name string) error { + switch name { + case categorycontent.FieldLanguageCode: + m.ResetLanguageCode() + return nil + case categorycontent.FieldName: + m.ResetName() + return nil + case categorycontent.FieldDescription: + m.ResetDescription() + return nil + case categorycontent.FieldSlug: + m.ResetSlug() + return nil + } + return fmt.Errorf("unknown CategoryContent field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *CategoryContentMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.category != nil { + edges = append(edges, categorycontent.EdgeCategory) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *CategoryContentMutation) AddedIDs(name string) []ent.Value { + switch name { + case categorycontent.EdgeCategory: + if id := m.category; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *CategoryContentMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *CategoryContentMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *CategoryContentMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedcategory { + edges = append(edges, categorycontent.EdgeCategory) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *CategoryContentMutation) EdgeCleared(name string) bool { + switch name { + case categorycontent.EdgeCategory: + return m.clearedcategory + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *CategoryContentMutation) ClearEdge(name string) error { + switch name { + case categorycontent.EdgeCategory: + m.ClearCategory() + return nil + } + return fmt.Errorf("unknown CategoryContent unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *CategoryContentMutation) ResetEdge(name string) error { + switch name { + case categorycontent.EdgeCategory: + m.ResetCategory() + return nil + } + return fmt.Errorf("unknown CategoryContent edge %s", name) +} + +// ContributorMutation represents an operation that mutates the Contributor nodes in the graph. +type ContributorMutation struct { + config + op Op + typ string + id *int + name *string + avatar_url *string + bio *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + user *int + cleareduser bool + social_links map[int]struct{} + removedsocial_links map[int]struct{} + clearedsocial_links bool + posts map[int]struct{} + removedposts map[int]struct{} + clearedposts bool + done bool + oldValue func(context.Context) (*Contributor, error) + predicates []predicate.Contributor +} + +var _ ent.Mutation = (*ContributorMutation)(nil) + +// contributorOption allows management of the mutation configuration using functional options. +type contributorOption func(*ContributorMutation) + +// newContributorMutation creates new mutation for the Contributor entity. +func newContributorMutation(c config, op Op, opts ...contributorOption) *ContributorMutation { + m := &ContributorMutation{ + config: c, + op: op, + typ: TypeContributor, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withContributorID sets the ID field of the mutation. +func withContributorID(id int) contributorOption { + return func(m *ContributorMutation) { + var ( + err error + once sync.Once + value *Contributor + ) + m.oldValue = func(ctx context.Context) (*Contributor, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Contributor.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withContributor sets the old Contributor of the mutation. +func withContributor(node *Contributor) contributorOption { + return func(m *ContributorMutation) { + m.oldValue = func(context.Context) (*Contributor, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ContributorMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ContributorMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ContributorMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ContributorMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Contributor.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *ContributorMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ContributorMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Contributor entity. +// If the Contributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ContributorMutation) ResetName() { + m.name = nil +} + +// SetAvatarURL sets the "avatar_url" field. +func (m *ContributorMutation) SetAvatarURL(s string) { + m.avatar_url = &s +} + +// AvatarURL returns the value of the "avatar_url" field in the mutation. +func (m *ContributorMutation) AvatarURL() (r string, exists bool) { + v := m.avatar_url + if v == nil { + return + } + return *v, true +} + +// OldAvatarURL returns the old "avatar_url" field's value of the Contributor entity. +// If the Contributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorMutation) OldAvatarURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAvatarURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAvatarURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAvatarURL: %w", err) + } + return oldValue.AvatarURL, nil +} + +// ClearAvatarURL clears the value of the "avatar_url" field. +func (m *ContributorMutation) ClearAvatarURL() { + m.avatar_url = nil + m.clearedFields[contributor.FieldAvatarURL] = struct{}{} +} + +// AvatarURLCleared returns if the "avatar_url" field was cleared in this mutation. +func (m *ContributorMutation) AvatarURLCleared() bool { + _, ok := m.clearedFields[contributor.FieldAvatarURL] + return ok +} + +// ResetAvatarURL resets all changes to the "avatar_url" field. +func (m *ContributorMutation) ResetAvatarURL() { + m.avatar_url = nil + delete(m.clearedFields, contributor.FieldAvatarURL) +} + +// SetBio sets the "bio" field. +func (m *ContributorMutation) SetBio(s string) { + m.bio = &s +} + +// Bio returns the value of the "bio" field in the mutation. +func (m *ContributorMutation) Bio() (r string, exists bool) { + v := m.bio + if v == nil { + return + } + return *v, true +} + +// OldBio returns the old "bio" field's value of the Contributor entity. +// If the Contributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorMutation) OldBio(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBio is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBio requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBio: %w", err) + } + return oldValue.Bio, nil +} + +// ClearBio clears the value of the "bio" field. +func (m *ContributorMutation) ClearBio() { + m.bio = nil + m.clearedFields[contributor.FieldBio] = struct{}{} +} + +// BioCleared returns if the "bio" field was cleared in this mutation. +func (m *ContributorMutation) BioCleared() bool { + _, ok := m.clearedFields[contributor.FieldBio] + return ok +} + +// ResetBio resets all changes to the "bio" field. +func (m *ContributorMutation) ResetBio() { + m.bio = nil + delete(m.clearedFields, contributor.FieldBio) +} + +// SetCreatedAt sets the "created_at" field. +func (m *ContributorMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ContributorMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Contributor entity. +// If the Contributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ContributorMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ContributorMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ContributorMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Contributor entity. +// If the Contributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ContributorMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUserID sets the "user" edge to the User entity by id. +func (m *ContributorMutation) SetUserID(id int) { + m.user = &id +} + +// ClearUser clears the "user" edge to the User entity. +func (m *ContributorMutation) ClearUser() { + m.cleareduser = true +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *ContributorMutation) UserCleared() bool { + return m.cleareduser +} + +// UserID returns the "user" edge ID in the mutation. +func (m *ContributorMutation) UserID() (id int, exists bool) { + if m.user != nil { + return *m.user, true + } + return +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *ContributorMutation) UserIDs() (ids []int) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *ContributorMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// AddSocialLinkIDs adds the "social_links" edge to the ContributorSocialLink entity by ids. +func (m *ContributorMutation) AddSocialLinkIDs(ids ...int) { + if m.social_links == nil { + m.social_links = make(map[int]struct{}) + } + for i := range ids { + m.social_links[ids[i]] = struct{}{} + } +} + +// ClearSocialLinks clears the "social_links" edge to the ContributorSocialLink entity. +func (m *ContributorMutation) ClearSocialLinks() { + m.clearedsocial_links = true +} + +// SocialLinksCleared reports if the "social_links" edge to the ContributorSocialLink entity was cleared. +func (m *ContributorMutation) SocialLinksCleared() bool { + return m.clearedsocial_links +} + +// RemoveSocialLinkIDs removes the "social_links" edge to the ContributorSocialLink entity by IDs. +func (m *ContributorMutation) RemoveSocialLinkIDs(ids ...int) { + if m.removedsocial_links == nil { + m.removedsocial_links = make(map[int]struct{}) + } + for i := range ids { + delete(m.social_links, ids[i]) + m.removedsocial_links[ids[i]] = struct{}{} + } +} + +// RemovedSocialLinks returns the removed IDs of the "social_links" edge to the ContributorSocialLink entity. +func (m *ContributorMutation) RemovedSocialLinksIDs() (ids []int) { + for id := range m.removedsocial_links { + ids = append(ids, id) + } + return +} + +// SocialLinksIDs returns the "social_links" edge IDs in the mutation. +func (m *ContributorMutation) SocialLinksIDs() (ids []int) { + for id := range m.social_links { + ids = append(ids, id) + } + return +} + +// ResetSocialLinks resets all changes to the "social_links" edge. +func (m *ContributorMutation) ResetSocialLinks() { + m.social_links = nil + m.clearedsocial_links = false + m.removedsocial_links = nil +} + +// AddPostIDs adds the "posts" edge to the PostContributor entity by ids. +func (m *ContributorMutation) AddPostIDs(ids ...int) { + if m.posts == nil { + m.posts = make(map[int]struct{}) + } + for i := range ids { + m.posts[ids[i]] = struct{}{} + } +} + +// ClearPosts clears the "posts" edge to the PostContributor entity. +func (m *ContributorMutation) ClearPosts() { + m.clearedposts = true +} + +// PostsCleared reports if the "posts" edge to the PostContributor entity was cleared. +func (m *ContributorMutation) PostsCleared() bool { + return m.clearedposts +} + +// RemovePostIDs removes the "posts" edge to the PostContributor entity by IDs. +func (m *ContributorMutation) RemovePostIDs(ids ...int) { + if m.removedposts == nil { + m.removedposts = make(map[int]struct{}) + } + for i := range ids { + delete(m.posts, ids[i]) + m.removedposts[ids[i]] = struct{}{} + } +} + +// RemovedPosts returns the removed IDs of the "posts" edge to the PostContributor entity. +func (m *ContributorMutation) RemovedPostsIDs() (ids []int) { + for id := range m.removedposts { + ids = append(ids, id) + } + return +} + +// PostsIDs returns the "posts" edge IDs in the mutation. +func (m *ContributorMutation) PostsIDs() (ids []int) { + for id := range m.posts { + ids = append(ids, id) + } + return +} + +// ResetPosts resets all changes to the "posts" edge. +func (m *ContributorMutation) ResetPosts() { + m.posts = nil + m.clearedposts = false + m.removedposts = nil +} + +// Where appends a list predicates to the ContributorMutation builder. +func (m *ContributorMutation) Where(ps ...predicate.Contributor) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ContributorMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ContributorMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Contributor, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ContributorMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ContributorMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Contributor). +func (m *ContributorMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ContributorMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.name != nil { + fields = append(fields, contributor.FieldName) + } + if m.avatar_url != nil { + fields = append(fields, contributor.FieldAvatarURL) + } + if m.bio != nil { + fields = append(fields, contributor.FieldBio) + } + if m.created_at != nil { + fields = append(fields, contributor.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, contributor.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ContributorMutation) Field(name string) (ent.Value, bool) { + switch name { + case contributor.FieldName: + return m.Name() + case contributor.FieldAvatarURL: + return m.AvatarURL() + case contributor.FieldBio: + return m.Bio() + case contributor.FieldCreatedAt: + return m.CreatedAt() + case contributor.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ContributorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case contributor.FieldName: + return m.OldName(ctx) + case contributor.FieldAvatarURL: + return m.OldAvatarURL(ctx) + case contributor.FieldBio: + return m.OldBio(ctx) + case contributor.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case contributor.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Contributor field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorMutation) SetField(name string, value ent.Value) error { + switch name { + case contributor.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case contributor.FieldAvatarURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAvatarURL(v) + return nil + case contributor.FieldBio: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBio(v) + return nil + case contributor.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case contributor.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Contributor field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ContributorMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ContributorMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Contributor numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ContributorMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(contributor.FieldAvatarURL) { + fields = append(fields, contributor.FieldAvatarURL) + } + if m.FieldCleared(contributor.FieldBio) { + fields = append(fields, contributor.FieldBio) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ContributorMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ContributorMutation) ClearField(name string) error { + switch name { + case contributor.FieldAvatarURL: + m.ClearAvatarURL() + return nil + case contributor.FieldBio: + m.ClearBio() + return nil + } + return fmt.Errorf("unknown Contributor nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ContributorMutation) ResetField(name string) error { + switch name { + case contributor.FieldName: + m.ResetName() + return nil + case contributor.FieldAvatarURL: + m.ResetAvatarURL() + return nil + case contributor.FieldBio: + m.ResetBio() + return nil + case contributor.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case contributor.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Contributor field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ContributorMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.user != nil { + edges = append(edges, contributor.EdgeUser) + } + if m.social_links != nil { + edges = append(edges, contributor.EdgeSocialLinks) + } + if m.posts != nil { + edges = append(edges, contributor.EdgePosts) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ContributorMutation) AddedIDs(name string) []ent.Value { + switch name { + case contributor.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case contributor.EdgeSocialLinks: + ids := make([]ent.Value, 0, len(m.social_links)) + for id := range m.social_links { + ids = append(ids, id) + } + return ids + case contributor.EdgePosts: + ids := make([]ent.Value, 0, len(m.posts)) + for id := range m.posts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ContributorMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedsocial_links != nil { + edges = append(edges, contributor.EdgeSocialLinks) + } + if m.removedposts != nil { + edges = append(edges, contributor.EdgePosts) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ContributorMutation) RemovedIDs(name string) []ent.Value { + switch name { + case contributor.EdgeSocialLinks: + ids := make([]ent.Value, 0, len(m.removedsocial_links)) + for id := range m.removedsocial_links { + ids = append(ids, id) + } + return ids + case contributor.EdgePosts: + ids := make([]ent.Value, 0, len(m.removedposts)) + for id := range m.removedposts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ContributorMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.cleareduser { + edges = append(edges, contributor.EdgeUser) + } + if m.clearedsocial_links { + edges = append(edges, contributor.EdgeSocialLinks) + } + if m.clearedposts { + edges = append(edges, contributor.EdgePosts) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ContributorMutation) EdgeCleared(name string) bool { + switch name { + case contributor.EdgeUser: + return m.cleareduser + case contributor.EdgeSocialLinks: + return m.clearedsocial_links + case contributor.EdgePosts: + return m.clearedposts + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ContributorMutation) ClearEdge(name string) error { + switch name { + case contributor.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown Contributor unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ContributorMutation) ResetEdge(name string) error { + switch name { + case contributor.EdgeUser: + m.ResetUser() + return nil + case contributor.EdgeSocialLinks: + m.ResetSocialLinks() + return nil + case contributor.EdgePosts: + m.ResetPosts() + return nil + } + return fmt.Errorf("unknown Contributor edge %s", name) +} + +// ContributorRoleMutation represents an operation that mutates the ContributorRole nodes in the graph. +type ContributorRoleMutation struct { + config + op Op + typ string + id *int + name *string + clearedFields map[string]struct{} + post_contributors map[int]struct{} + removedpost_contributors map[int]struct{} + clearedpost_contributors bool + done bool + oldValue func(context.Context) (*ContributorRole, error) + predicates []predicate.ContributorRole +} + +var _ ent.Mutation = (*ContributorRoleMutation)(nil) + +// contributorroleOption allows management of the mutation configuration using functional options. +type contributorroleOption func(*ContributorRoleMutation) + +// newContributorRoleMutation creates new mutation for the ContributorRole entity. +func newContributorRoleMutation(c config, op Op, opts ...contributorroleOption) *ContributorRoleMutation { + m := &ContributorRoleMutation{ + config: c, + op: op, + typ: TypeContributorRole, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withContributorRoleID sets the ID field of the mutation. +func withContributorRoleID(id int) contributorroleOption { + return func(m *ContributorRoleMutation) { + var ( + err error + once sync.Once + value *ContributorRole + ) + m.oldValue = func(ctx context.Context) (*ContributorRole, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ContributorRole.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withContributorRole sets the old ContributorRole of the mutation. +func withContributorRole(node *ContributorRole) contributorroleOption { + return func(m *ContributorRoleMutation) { + m.oldValue = func(context.Context) (*ContributorRole, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ContributorRoleMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ContributorRoleMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ContributorRoleMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ContributorRoleMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ContributorRole.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *ContributorRoleMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ContributorRoleMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the ContributorRole entity. +// If the ContributorRole object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorRoleMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ContributorRoleMutation) ResetName() { + m.name = nil +} + +// AddPostContributorIDs adds the "post_contributors" edge to the PostContributor entity by ids. +func (m *ContributorRoleMutation) AddPostContributorIDs(ids ...int) { + if m.post_contributors == nil { + m.post_contributors = make(map[int]struct{}) + } + for i := range ids { + m.post_contributors[ids[i]] = struct{}{} + } +} + +// ClearPostContributors clears the "post_contributors" edge to the PostContributor entity. +func (m *ContributorRoleMutation) ClearPostContributors() { + m.clearedpost_contributors = true +} + +// PostContributorsCleared reports if the "post_contributors" edge to the PostContributor entity was cleared. +func (m *ContributorRoleMutation) PostContributorsCleared() bool { + return m.clearedpost_contributors +} + +// RemovePostContributorIDs removes the "post_contributors" edge to the PostContributor entity by IDs. +func (m *ContributorRoleMutation) RemovePostContributorIDs(ids ...int) { + if m.removedpost_contributors == nil { + m.removedpost_contributors = make(map[int]struct{}) + } + for i := range ids { + delete(m.post_contributors, ids[i]) + m.removedpost_contributors[ids[i]] = struct{}{} + } +} + +// RemovedPostContributors returns the removed IDs of the "post_contributors" edge to the PostContributor entity. +func (m *ContributorRoleMutation) RemovedPostContributorsIDs() (ids []int) { + for id := range m.removedpost_contributors { + ids = append(ids, id) + } + return +} + +// PostContributorsIDs returns the "post_contributors" edge IDs in the mutation. +func (m *ContributorRoleMutation) PostContributorsIDs() (ids []int) { + for id := range m.post_contributors { + ids = append(ids, id) + } + return +} + +// ResetPostContributors resets all changes to the "post_contributors" edge. +func (m *ContributorRoleMutation) ResetPostContributors() { + m.post_contributors = nil + m.clearedpost_contributors = false + m.removedpost_contributors = nil +} + +// Where appends a list predicates to the ContributorRoleMutation builder. +func (m *ContributorRoleMutation) Where(ps ...predicate.ContributorRole) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ContributorRoleMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ContributorRoleMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ContributorRole, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ContributorRoleMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ContributorRoleMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ContributorRole). +func (m *ContributorRoleMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ContributorRoleMutation) Fields() []string { + fields := make([]string, 0, 1) + if m.name != nil { + fields = append(fields, contributorrole.FieldName) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ContributorRoleMutation) Field(name string) (ent.Value, bool) { + switch name { + case contributorrole.FieldName: + return m.Name() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ContributorRoleMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case contributorrole.FieldName: + return m.OldName(ctx) + } + return nil, fmt.Errorf("unknown ContributorRole field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorRoleMutation) SetField(name string, value ent.Value) error { + switch name { + case contributorrole.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + } + return fmt.Errorf("unknown ContributorRole field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ContributorRoleMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ContributorRoleMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorRoleMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown ContributorRole numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ContributorRoleMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ContributorRoleMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ContributorRoleMutation) ClearField(name string) error { + return fmt.Errorf("unknown ContributorRole nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ContributorRoleMutation) ResetField(name string) error { + switch name { + case contributorrole.FieldName: + m.ResetName() + return nil + } + return fmt.Errorf("unknown ContributorRole field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ContributorRoleMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.post_contributors != nil { + edges = append(edges, contributorrole.EdgePostContributors) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ContributorRoleMutation) AddedIDs(name string) []ent.Value { + switch name { + case contributorrole.EdgePostContributors: + ids := make([]ent.Value, 0, len(m.post_contributors)) + for id := range m.post_contributors { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ContributorRoleMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedpost_contributors != nil { + edges = append(edges, contributorrole.EdgePostContributors) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ContributorRoleMutation) RemovedIDs(name string) []ent.Value { + switch name { + case contributorrole.EdgePostContributors: + ids := make([]ent.Value, 0, len(m.removedpost_contributors)) + for id := range m.removedpost_contributors { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ContributorRoleMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedpost_contributors { + edges = append(edges, contributorrole.EdgePostContributors) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ContributorRoleMutation) EdgeCleared(name string) bool { + switch name { + case contributorrole.EdgePostContributors: + return m.clearedpost_contributors + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ContributorRoleMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown ContributorRole unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ContributorRoleMutation) ResetEdge(name string) error { + switch name { + case contributorrole.EdgePostContributors: + m.ResetPostContributors() + return nil + } + return fmt.Errorf("unknown ContributorRole edge %s", name) +} + +// ContributorSocialLinkMutation represents an operation that mutates the ContributorSocialLink nodes in the graph. +type ContributorSocialLinkMutation struct { + config + op Op + typ string + id *int + _type *contributorsociallink.Type + name *string + value *string + clearedFields map[string]struct{} + contributor *int + clearedcontributor bool + done bool + oldValue func(context.Context) (*ContributorSocialLink, error) + predicates []predicate.ContributorSocialLink +} + +var _ ent.Mutation = (*ContributorSocialLinkMutation)(nil) + +// contributorsociallinkOption allows management of the mutation configuration using functional options. +type contributorsociallinkOption func(*ContributorSocialLinkMutation) + +// newContributorSocialLinkMutation creates new mutation for the ContributorSocialLink entity. +func newContributorSocialLinkMutation(c config, op Op, opts ...contributorsociallinkOption) *ContributorSocialLinkMutation { + m := &ContributorSocialLinkMutation{ + config: c, + op: op, + typ: TypeContributorSocialLink, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withContributorSocialLinkID sets the ID field of the mutation. +func withContributorSocialLinkID(id int) contributorsociallinkOption { + return func(m *ContributorSocialLinkMutation) { + var ( + err error + once sync.Once + value *ContributorSocialLink + ) + m.oldValue = func(ctx context.Context) (*ContributorSocialLink, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ContributorSocialLink.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withContributorSocialLink sets the old ContributorSocialLink of the mutation. +func withContributorSocialLink(node *ContributorSocialLink) contributorsociallinkOption { + return func(m *ContributorSocialLinkMutation) { + m.oldValue = func(context.Context) (*ContributorSocialLink, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ContributorSocialLinkMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ContributorSocialLinkMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ContributorSocialLinkMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ContributorSocialLinkMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ContributorSocialLink.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetType sets the "type" field. +func (m *ContributorSocialLinkMutation) SetType(c contributorsociallink.Type) { + m._type = &c +} + +// GetType returns the value of the "type" field in the mutation. +func (m *ContributorSocialLinkMutation) GetType() (r contributorsociallink.Type, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the ContributorSocialLink entity. +// If the ContributorSocialLink object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorSocialLinkMutation) OldType(ctx context.Context) (v contributorsociallink.Type, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *ContributorSocialLinkMutation) ResetType() { + m._type = nil +} + +// SetName sets the "name" field. +func (m *ContributorSocialLinkMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ContributorSocialLinkMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the ContributorSocialLink entity. +// If the ContributorSocialLink object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorSocialLinkMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ClearName clears the value of the "name" field. +func (m *ContributorSocialLinkMutation) ClearName() { + m.name = nil + m.clearedFields[contributorsociallink.FieldName] = struct{}{} +} + +// NameCleared returns if the "name" field was cleared in this mutation. +func (m *ContributorSocialLinkMutation) NameCleared() bool { + _, ok := m.clearedFields[contributorsociallink.FieldName] + return ok +} + +// ResetName resets all changes to the "name" field. +func (m *ContributorSocialLinkMutation) ResetName() { + m.name = nil + delete(m.clearedFields, contributorsociallink.FieldName) +} + +// SetValue sets the "value" field. +func (m *ContributorSocialLinkMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *ContributorSocialLinkMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the ContributorSocialLink entity. +// If the ContributorSocialLink object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ContributorSocialLinkMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *ContributorSocialLinkMutation) ResetValue() { + m.value = nil +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by id. +func (m *ContributorSocialLinkMutation) SetContributorID(id int) { + m.contributor = &id +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (m *ContributorSocialLinkMutation) ClearContributor() { + m.clearedcontributor = true +} + +// ContributorCleared reports if the "contributor" edge to the Contributor entity was cleared. +func (m *ContributorSocialLinkMutation) ContributorCleared() bool { + return m.clearedcontributor +} + +// ContributorID returns the "contributor" edge ID in the mutation. +func (m *ContributorSocialLinkMutation) ContributorID() (id int, exists bool) { + if m.contributor != nil { + return *m.contributor, true + } + return +} + +// ContributorIDs returns the "contributor" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// ContributorID instead. It exists only for internal usage by the builders. +func (m *ContributorSocialLinkMutation) ContributorIDs() (ids []int) { + if id := m.contributor; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetContributor resets all changes to the "contributor" edge. +func (m *ContributorSocialLinkMutation) ResetContributor() { + m.contributor = nil + m.clearedcontributor = false +} + +// Where appends a list predicates to the ContributorSocialLinkMutation builder. +func (m *ContributorSocialLinkMutation) Where(ps ...predicate.ContributorSocialLink) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ContributorSocialLinkMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ContributorSocialLinkMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ContributorSocialLink, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ContributorSocialLinkMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ContributorSocialLinkMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ContributorSocialLink). +func (m *ContributorSocialLinkMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ContributorSocialLinkMutation) Fields() []string { + fields := make([]string, 0, 3) + if m._type != nil { + fields = append(fields, contributorsociallink.FieldType) + } + if m.name != nil { + fields = append(fields, contributorsociallink.FieldName) + } + if m.value != nil { + fields = append(fields, contributorsociallink.FieldValue) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ContributorSocialLinkMutation) Field(name string) (ent.Value, bool) { + switch name { + case contributorsociallink.FieldType: + return m.GetType() + case contributorsociallink.FieldName: + return m.Name() + case contributorsociallink.FieldValue: + return m.Value() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ContributorSocialLinkMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case contributorsociallink.FieldType: + return m.OldType(ctx) + case contributorsociallink.FieldName: + return m.OldName(ctx) + case contributorsociallink.FieldValue: + return m.OldValue(ctx) + } + return nil, fmt.Errorf("unknown ContributorSocialLink field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorSocialLinkMutation) SetField(name string, value ent.Value) error { + switch name { + case contributorsociallink.FieldType: + v, ok := value.(contributorsociallink.Type) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case contributorsociallink.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case contributorsociallink.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + } + return fmt.Errorf("unknown ContributorSocialLink field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ContributorSocialLinkMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ContributorSocialLinkMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ContributorSocialLinkMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown ContributorSocialLink numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ContributorSocialLinkMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(contributorsociallink.FieldName) { + fields = append(fields, contributorsociallink.FieldName) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ContributorSocialLinkMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ContributorSocialLinkMutation) ClearField(name string) error { + switch name { + case contributorsociallink.FieldName: + m.ClearName() + return nil + } + return fmt.Errorf("unknown ContributorSocialLink nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ContributorSocialLinkMutation) ResetField(name string) error { + switch name { + case contributorsociallink.FieldType: + m.ResetType() + return nil + case contributorsociallink.FieldName: + m.ResetName() + return nil + case contributorsociallink.FieldValue: + m.ResetValue() + return nil + } + return fmt.Errorf("unknown ContributorSocialLink field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ContributorSocialLinkMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.contributor != nil { + edges = append(edges, contributorsociallink.EdgeContributor) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ContributorSocialLinkMutation) AddedIDs(name string) []ent.Value { + switch name { + case contributorsociallink.EdgeContributor: + if id := m.contributor; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ContributorSocialLinkMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ContributorSocialLinkMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ContributorSocialLinkMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedcontributor { + edges = append(edges, contributorsociallink.EdgeContributor) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ContributorSocialLinkMutation) EdgeCleared(name string) bool { + switch name { + case contributorsociallink.EdgeContributor: + return m.clearedcontributor + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ContributorSocialLinkMutation) ClearEdge(name string) error { + switch name { + case contributorsociallink.EdgeContributor: + m.ClearContributor() + return nil + } + return fmt.Errorf("unknown ContributorSocialLink unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ContributorSocialLinkMutation) ResetEdge(name string) error { + switch name { + case contributorsociallink.EdgeContributor: + m.ResetContributor() + return nil + } + return fmt.Errorf("unknown ContributorSocialLink edge %s", name) +} + +// DailyMutation represents an operation that mutates the Daily nodes in the graph. +type DailyMutation struct { + config + op Op + typ string + id *string + image_url *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + category *int + clearedcategory bool + contents map[int]struct{} + removedcontents map[int]struct{} + clearedcontents bool + done bool + oldValue func(context.Context) (*Daily, error) + predicates []predicate.Daily +} + +var _ ent.Mutation = (*DailyMutation)(nil) + +// dailyOption allows management of the mutation configuration using functional options. +type dailyOption func(*DailyMutation) + +// newDailyMutation creates new mutation for the Daily entity. +func newDailyMutation(c config, op Op, opts ...dailyOption) *DailyMutation { + m := &DailyMutation{ + config: c, + op: op, + typ: TypeDaily, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDailyID sets the ID field of the mutation. +func withDailyID(id string) dailyOption { + return func(m *DailyMutation) { + var ( + err error + once sync.Once + value *Daily + ) + m.oldValue = func(ctx context.Context) (*Daily, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Daily.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDaily sets the old Daily of the mutation. +func withDaily(node *Daily) dailyOption { + return func(m *DailyMutation) { + m.oldValue = func(context.Context) (*Daily, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DailyMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DailyMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Daily entities. +func (m *DailyMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DailyMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DailyMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Daily.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetImageURL sets the "image_url" field. +func (m *DailyMutation) SetImageURL(s string) { + m.image_url = &s +} + +// ImageURL returns the value of the "image_url" field in the mutation. +func (m *DailyMutation) ImageURL() (r string, exists bool) { + v := m.image_url + if v == nil { + return + } + return *v, true +} + +// OldImageURL returns the old "image_url" field's value of the Daily entity. +// If the Daily object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyMutation) OldImageURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImageURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImageURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImageURL: %w", err) + } + return oldValue.ImageURL, nil +} + +// ResetImageURL resets all changes to the "image_url" field. +func (m *DailyMutation) ResetImageURL() { + m.image_url = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *DailyMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DailyMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Daily entity. +// If the Daily object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DailyMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DailyMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DailyMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Daily entity. +// If the Daily object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DailyMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetCategoryID sets the "category" edge to the Category entity by id. +func (m *DailyMutation) SetCategoryID(id int) { + m.category = &id +} + +// ClearCategory clears the "category" edge to the Category entity. +func (m *DailyMutation) ClearCategory() { + m.clearedcategory = true +} + +// CategoryCleared reports if the "category" edge to the Category entity was cleared. +func (m *DailyMutation) CategoryCleared() bool { + return m.clearedcategory +} + +// CategoryID returns the "category" edge ID in the mutation. +func (m *DailyMutation) CategoryID() (id int, exists bool) { + if m.category != nil { + return *m.category, true + } + return +} + +// CategoryIDs returns the "category" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// CategoryID instead. It exists only for internal usage by the builders. +func (m *DailyMutation) CategoryIDs() (ids []int) { + if id := m.category; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetCategory resets all changes to the "category" edge. +func (m *DailyMutation) ResetCategory() { + m.category = nil + m.clearedcategory = false +} + +// AddContentIDs adds the "contents" edge to the DailyContent entity by ids. +func (m *DailyMutation) AddContentIDs(ids ...int) { + if m.contents == nil { + m.contents = make(map[int]struct{}) + } + for i := range ids { + m.contents[ids[i]] = struct{}{} + } +} + +// ClearContents clears the "contents" edge to the DailyContent entity. +func (m *DailyMutation) ClearContents() { + m.clearedcontents = true +} + +// ContentsCleared reports if the "contents" edge to the DailyContent entity was cleared. +func (m *DailyMutation) ContentsCleared() bool { + return m.clearedcontents +} + +// RemoveContentIDs removes the "contents" edge to the DailyContent entity by IDs. +func (m *DailyMutation) RemoveContentIDs(ids ...int) { + if m.removedcontents == nil { + m.removedcontents = make(map[int]struct{}) + } + for i := range ids { + delete(m.contents, ids[i]) + m.removedcontents[ids[i]] = struct{}{} + } +} + +// RemovedContents returns the removed IDs of the "contents" edge to the DailyContent entity. +func (m *DailyMutation) RemovedContentsIDs() (ids []int) { + for id := range m.removedcontents { + ids = append(ids, id) + } + return +} + +// ContentsIDs returns the "contents" edge IDs in the mutation. +func (m *DailyMutation) ContentsIDs() (ids []int) { + for id := range m.contents { + ids = append(ids, id) + } + return +} + +// ResetContents resets all changes to the "contents" edge. +func (m *DailyMutation) ResetContents() { + m.contents = nil + m.clearedcontents = false + m.removedcontents = nil +} + +// Where appends a list predicates to the DailyMutation builder. +func (m *DailyMutation) Where(ps ...predicate.Daily) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DailyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DailyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Daily, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DailyMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DailyMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Daily). +func (m *DailyMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DailyMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.image_url != nil { + fields = append(fields, daily.FieldImageURL) + } + if m.created_at != nil { + fields = append(fields, daily.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, daily.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DailyMutation) Field(name string) (ent.Value, bool) { + switch name { + case daily.FieldImageURL: + return m.ImageURL() + case daily.FieldCreatedAt: + return m.CreatedAt() + case daily.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DailyMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case daily.FieldImageURL: + return m.OldImageURL(ctx) + case daily.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case daily.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Daily field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyMutation) SetField(name string, value ent.Value) error { + switch name { + case daily.FieldImageURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImageURL(v) + return nil + case daily.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case daily.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Daily field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DailyMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DailyMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Daily numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DailyMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DailyMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DailyMutation) ClearField(name string) error { + return fmt.Errorf("unknown Daily nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DailyMutation) ResetField(name string) error { + switch name { + case daily.FieldImageURL: + m.ResetImageURL() + return nil + case daily.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case daily.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Daily field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DailyMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.category != nil { + edges = append(edges, daily.EdgeCategory) + } + if m.contents != nil { + edges = append(edges, daily.EdgeContents) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DailyMutation) AddedIDs(name string) []ent.Value { + switch name { + case daily.EdgeCategory: + if id := m.category; id != nil { + return []ent.Value{*id} + } + case daily.EdgeContents: + ids := make([]ent.Value, 0, len(m.contents)) + for id := range m.contents { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DailyMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removedcontents != nil { + edges = append(edges, daily.EdgeContents) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DailyMutation) RemovedIDs(name string) []ent.Value { + switch name { + case daily.EdgeContents: + ids := make([]ent.Value, 0, len(m.removedcontents)) + for id := range m.removedcontents { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DailyMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedcategory { + edges = append(edges, daily.EdgeCategory) + } + if m.clearedcontents { + edges = append(edges, daily.EdgeContents) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DailyMutation) EdgeCleared(name string) bool { + switch name { + case daily.EdgeCategory: + return m.clearedcategory + case daily.EdgeContents: + return m.clearedcontents + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DailyMutation) ClearEdge(name string) error { + switch name { + case daily.EdgeCategory: + m.ClearCategory() + return nil + } + return fmt.Errorf("unknown Daily unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DailyMutation) ResetEdge(name string) error { + switch name { + case daily.EdgeCategory: + m.ResetCategory() + return nil + case daily.EdgeContents: + m.ResetContents() + return nil + } + return fmt.Errorf("unknown Daily edge %s", name) +} + +// DailyCategoryMutation represents an operation that mutates the DailyCategory nodes in the graph. +type DailyCategoryMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + contents map[int]struct{} + removedcontents map[int]struct{} + clearedcontents bool + daily_items map[string]struct{} + removeddaily_items map[string]struct{} + cleareddaily_items bool + done bool + oldValue func(context.Context) (*DailyCategory, error) + predicates []predicate.DailyCategory +} + +var _ ent.Mutation = (*DailyCategoryMutation)(nil) + +// dailycategoryOption allows management of the mutation configuration using functional options. +type dailycategoryOption func(*DailyCategoryMutation) + +// newDailyCategoryMutation creates new mutation for the DailyCategory entity. +func newDailyCategoryMutation(c config, op Op, opts ...dailycategoryOption) *DailyCategoryMutation { + m := &DailyCategoryMutation{ + config: c, + op: op, + typ: TypeDailyCategory, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDailyCategoryID sets the ID field of the mutation. +func withDailyCategoryID(id int) dailycategoryOption { + return func(m *DailyCategoryMutation) { + var ( + err error + once sync.Once + value *DailyCategory + ) + m.oldValue = func(ctx context.Context) (*DailyCategory, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DailyCategory.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDailyCategory sets the old DailyCategory of the mutation. +func withDailyCategory(node *DailyCategory) dailycategoryOption { + return func(m *DailyCategoryMutation) { + m.oldValue = func(context.Context) (*DailyCategory, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DailyCategoryMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DailyCategoryMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DailyCategoryMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DailyCategoryMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DailyCategory.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *DailyCategoryMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *DailyCategoryMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the DailyCategory entity. +// If the DailyCategory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyCategoryMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *DailyCategoryMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *DailyCategoryMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *DailyCategoryMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the DailyCategory entity. +// If the DailyCategory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyCategoryMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *DailyCategoryMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddContentIDs adds the "contents" edge to the DailyCategoryContent entity by ids. +func (m *DailyCategoryMutation) AddContentIDs(ids ...int) { + if m.contents == nil { + m.contents = make(map[int]struct{}) + } + for i := range ids { + m.contents[ids[i]] = struct{}{} + } +} + +// ClearContents clears the "contents" edge to the DailyCategoryContent entity. +func (m *DailyCategoryMutation) ClearContents() { + m.clearedcontents = true +} + +// ContentsCleared reports if the "contents" edge to the DailyCategoryContent entity was cleared. +func (m *DailyCategoryMutation) ContentsCleared() bool { + return m.clearedcontents +} + +// RemoveContentIDs removes the "contents" edge to the DailyCategoryContent entity by IDs. +func (m *DailyCategoryMutation) RemoveContentIDs(ids ...int) { + if m.removedcontents == nil { + m.removedcontents = make(map[int]struct{}) + } + for i := range ids { + delete(m.contents, ids[i]) + m.removedcontents[ids[i]] = struct{}{} + } +} + +// RemovedContents returns the removed IDs of the "contents" edge to the DailyCategoryContent entity. +func (m *DailyCategoryMutation) RemovedContentsIDs() (ids []int) { + for id := range m.removedcontents { + ids = append(ids, id) + } + return +} + +// ContentsIDs returns the "contents" edge IDs in the mutation. +func (m *DailyCategoryMutation) ContentsIDs() (ids []int) { + for id := range m.contents { + ids = append(ids, id) + } + return +} + +// ResetContents resets all changes to the "contents" edge. +func (m *DailyCategoryMutation) ResetContents() { + m.contents = nil + m.clearedcontents = false + m.removedcontents = nil +} + +// AddDailyItemIDs adds the "daily_items" edge to the Daily entity by ids. +func (m *DailyCategoryMutation) AddDailyItemIDs(ids ...string) { + if m.daily_items == nil { + m.daily_items = make(map[string]struct{}) + } + for i := range ids { + m.daily_items[ids[i]] = struct{}{} + } +} + +// ClearDailyItems clears the "daily_items" edge to the Daily entity. +func (m *DailyCategoryMutation) ClearDailyItems() { + m.cleareddaily_items = true +} + +// DailyItemsCleared reports if the "daily_items" edge to the Daily entity was cleared. +func (m *DailyCategoryMutation) DailyItemsCleared() bool { + return m.cleareddaily_items +} + +// RemoveDailyItemIDs removes the "daily_items" edge to the Daily entity by IDs. +func (m *DailyCategoryMutation) RemoveDailyItemIDs(ids ...string) { + if m.removeddaily_items == nil { + m.removeddaily_items = make(map[string]struct{}) + } + for i := range ids { + delete(m.daily_items, ids[i]) + m.removeddaily_items[ids[i]] = struct{}{} + } +} + +// RemovedDailyItems returns the removed IDs of the "daily_items" edge to the Daily entity. +func (m *DailyCategoryMutation) RemovedDailyItemsIDs() (ids []string) { + for id := range m.removeddaily_items { + ids = append(ids, id) + } + return +} + +// DailyItemsIDs returns the "daily_items" edge IDs in the mutation. +func (m *DailyCategoryMutation) DailyItemsIDs() (ids []string) { + for id := range m.daily_items { + ids = append(ids, id) + } + return +} + +// ResetDailyItems resets all changes to the "daily_items" edge. +func (m *DailyCategoryMutation) ResetDailyItems() { + m.daily_items = nil + m.cleareddaily_items = false + m.removeddaily_items = nil +} + +// Where appends a list predicates to the DailyCategoryMutation builder. +func (m *DailyCategoryMutation) Where(ps ...predicate.DailyCategory) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DailyCategoryMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DailyCategoryMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DailyCategory, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DailyCategoryMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DailyCategoryMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (DailyCategory). +func (m *DailyCategoryMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DailyCategoryMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.created_at != nil { + fields = append(fields, dailycategory.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, dailycategory.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DailyCategoryMutation) Field(name string) (ent.Value, bool) { + switch name { + case dailycategory.FieldCreatedAt: + return m.CreatedAt() + case dailycategory.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DailyCategoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case dailycategory.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case dailycategory.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown DailyCategory field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyCategoryMutation) SetField(name string, value ent.Value) error { + switch name { + case dailycategory.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case dailycategory.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown DailyCategory field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DailyCategoryMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DailyCategoryMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyCategoryMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown DailyCategory numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DailyCategoryMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DailyCategoryMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DailyCategoryMutation) ClearField(name string) error { + return fmt.Errorf("unknown DailyCategory nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DailyCategoryMutation) ResetField(name string) error { + switch name { + case dailycategory.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case dailycategory.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown DailyCategory field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DailyCategoryMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.contents != nil { + edges = append(edges, dailycategory.EdgeContents) + } + if m.daily_items != nil { + edges = append(edges, dailycategory.EdgeDailyItems) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DailyCategoryMutation) AddedIDs(name string) []ent.Value { + switch name { + case dailycategory.EdgeContents: + ids := make([]ent.Value, 0, len(m.contents)) + for id := range m.contents { + ids = append(ids, id) + } + return ids + case dailycategory.EdgeDailyItems: + ids := make([]ent.Value, 0, len(m.daily_items)) + for id := range m.daily_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DailyCategoryMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removedcontents != nil { + edges = append(edges, dailycategory.EdgeContents) + } + if m.removeddaily_items != nil { + edges = append(edges, dailycategory.EdgeDailyItems) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DailyCategoryMutation) RemovedIDs(name string) []ent.Value { + switch name { + case dailycategory.EdgeContents: + ids := make([]ent.Value, 0, len(m.removedcontents)) + for id := range m.removedcontents { + ids = append(ids, id) + } + return ids + case dailycategory.EdgeDailyItems: + ids := make([]ent.Value, 0, len(m.removeddaily_items)) + for id := range m.removeddaily_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DailyCategoryMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedcontents { + edges = append(edges, dailycategory.EdgeContents) + } + if m.cleareddaily_items { + edges = append(edges, dailycategory.EdgeDailyItems) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DailyCategoryMutation) EdgeCleared(name string) bool { + switch name { + case dailycategory.EdgeContents: + return m.clearedcontents + case dailycategory.EdgeDailyItems: + return m.cleareddaily_items + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DailyCategoryMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown DailyCategory unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DailyCategoryMutation) ResetEdge(name string) error { + switch name { + case dailycategory.EdgeContents: + m.ResetContents() + return nil + case dailycategory.EdgeDailyItems: + m.ResetDailyItems() + return nil + } + return fmt.Errorf("unknown DailyCategory edge %s", name) +} + +// DailyCategoryContentMutation represents an operation that mutates the DailyCategoryContent nodes in the graph. +type DailyCategoryContentMutation struct { + config + op Op + typ string + id *int + language_code *dailycategorycontent.LanguageCode + name *string + clearedFields map[string]struct{} + category *int + clearedcategory bool + done bool + oldValue func(context.Context) (*DailyCategoryContent, error) + predicates []predicate.DailyCategoryContent +} + +var _ ent.Mutation = (*DailyCategoryContentMutation)(nil) + +// dailycategorycontentOption allows management of the mutation configuration using functional options. +type dailycategorycontentOption func(*DailyCategoryContentMutation) + +// newDailyCategoryContentMutation creates new mutation for the DailyCategoryContent entity. +func newDailyCategoryContentMutation(c config, op Op, opts ...dailycategorycontentOption) *DailyCategoryContentMutation { + m := &DailyCategoryContentMutation{ + config: c, + op: op, + typ: TypeDailyCategoryContent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDailyCategoryContentID sets the ID field of the mutation. +func withDailyCategoryContentID(id int) dailycategorycontentOption { + return func(m *DailyCategoryContentMutation) { + var ( + err error + once sync.Once + value *DailyCategoryContent + ) + m.oldValue = func(ctx context.Context) (*DailyCategoryContent, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DailyCategoryContent.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDailyCategoryContent sets the old DailyCategoryContent of the mutation. +func withDailyCategoryContent(node *DailyCategoryContent) dailycategorycontentOption { + return func(m *DailyCategoryContentMutation) { + m.oldValue = func(context.Context) (*DailyCategoryContent, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DailyCategoryContentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DailyCategoryContentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DailyCategoryContentMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DailyCategoryContentMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DailyCategoryContent.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetLanguageCode sets the "language_code" field. +func (m *DailyCategoryContentMutation) SetLanguageCode(dc dailycategorycontent.LanguageCode) { + m.language_code = &dc +} + +// LanguageCode returns the value of the "language_code" field in the mutation. +func (m *DailyCategoryContentMutation) LanguageCode() (r dailycategorycontent.LanguageCode, exists bool) { + v := m.language_code + if v == nil { + return + } + return *v, true +} + +// OldLanguageCode returns the old "language_code" field's value of the DailyCategoryContent entity. +// If the DailyCategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyCategoryContentMutation) OldLanguageCode(ctx context.Context) (v dailycategorycontent.LanguageCode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLanguageCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLanguageCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLanguageCode: %w", err) + } + return oldValue.LanguageCode, nil +} + +// ResetLanguageCode resets all changes to the "language_code" field. +func (m *DailyCategoryContentMutation) ResetLanguageCode() { + m.language_code = nil +} + +// SetName sets the "name" field. +func (m *DailyCategoryContentMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *DailyCategoryContentMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the DailyCategoryContent entity. +// If the DailyCategoryContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyCategoryContentMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *DailyCategoryContentMutation) ResetName() { + m.name = nil +} + +// SetCategoryID sets the "category" edge to the DailyCategory entity by id. +func (m *DailyCategoryContentMutation) SetCategoryID(id int) { + m.category = &id +} + +// ClearCategory clears the "category" edge to the DailyCategory entity. +func (m *DailyCategoryContentMutation) ClearCategory() { + m.clearedcategory = true +} + +// CategoryCleared reports if the "category" edge to the DailyCategory entity was cleared. +func (m *DailyCategoryContentMutation) CategoryCleared() bool { + return m.clearedcategory +} + +// CategoryID returns the "category" edge ID in the mutation. +func (m *DailyCategoryContentMutation) CategoryID() (id int, exists bool) { + if m.category != nil { + return *m.category, true + } + return +} + +// CategoryIDs returns the "category" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// CategoryID instead. It exists only for internal usage by the builders. +func (m *DailyCategoryContentMutation) CategoryIDs() (ids []int) { + if id := m.category; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetCategory resets all changes to the "category" edge. +func (m *DailyCategoryContentMutation) ResetCategory() { + m.category = nil + m.clearedcategory = false +} + +// Where appends a list predicates to the DailyCategoryContentMutation builder. +func (m *DailyCategoryContentMutation) Where(ps ...predicate.DailyCategoryContent) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DailyCategoryContentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DailyCategoryContentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DailyCategoryContent, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DailyCategoryContentMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DailyCategoryContentMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (DailyCategoryContent). +func (m *DailyCategoryContentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DailyCategoryContentMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.language_code != nil { + fields = append(fields, dailycategorycontent.FieldLanguageCode) + } + if m.name != nil { + fields = append(fields, dailycategorycontent.FieldName) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DailyCategoryContentMutation) Field(name string) (ent.Value, bool) { + switch name { + case dailycategorycontent.FieldLanguageCode: + return m.LanguageCode() + case dailycategorycontent.FieldName: + return m.Name() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DailyCategoryContentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case dailycategorycontent.FieldLanguageCode: + return m.OldLanguageCode(ctx) + case dailycategorycontent.FieldName: + return m.OldName(ctx) + } + return nil, fmt.Errorf("unknown DailyCategoryContent field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyCategoryContentMutation) SetField(name string, value ent.Value) error { + switch name { + case dailycategorycontent.FieldLanguageCode: + v, ok := value.(dailycategorycontent.LanguageCode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLanguageCode(v) + return nil + case dailycategorycontent.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + } + return fmt.Errorf("unknown DailyCategoryContent field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DailyCategoryContentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DailyCategoryContentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyCategoryContentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown DailyCategoryContent numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DailyCategoryContentMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DailyCategoryContentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DailyCategoryContentMutation) ClearField(name string) error { + return fmt.Errorf("unknown DailyCategoryContent nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DailyCategoryContentMutation) ResetField(name string) error { + switch name { + case dailycategorycontent.FieldLanguageCode: + m.ResetLanguageCode() + return nil + case dailycategorycontent.FieldName: + m.ResetName() + return nil + } + return fmt.Errorf("unknown DailyCategoryContent field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DailyCategoryContentMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.category != nil { + edges = append(edges, dailycategorycontent.EdgeCategory) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DailyCategoryContentMutation) AddedIDs(name string) []ent.Value { + switch name { + case dailycategorycontent.EdgeCategory: + if id := m.category; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DailyCategoryContentMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DailyCategoryContentMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DailyCategoryContentMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedcategory { + edges = append(edges, dailycategorycontent.EdgeCategory) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DailyCategoryContentMutation) EdgeCleared(name string) bool { + switch name { + case dailycategorycontent.EdgeCategory: + return m.clearedcategory + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DailyCategoryContentMutation) ClearEdge(name string) error { + switch name { + case dailycategorycontent.EdgeCategory: + m.ClearCategory() + return nil + } + return fmt.Errorf("unknown DailyCategoryContent unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DailyCategoryContentMutation) ResetEdge(name string) error { + switch name { + case dailycategorycontent.EdgeCategory: + m.ResetCategory() + return nil + } + return fmt.Errorf("unknown DailyCategoryContent edge %s", name) +} + +// DailyContentMutation represents an operation that mutates the DailyContent nodes in the graph. +type DailyContentMutation struct { + config + op Op + typ string + id *int + language_code *dailycontent.LanguageCode + quote *string + clearedFields map[string]struct{} + daily *string + cleareddaily bool + done bool + oldValue func(context.Context) (*DailyContent, error) + predicates []predicate.DailyContent +} + +var _ ent.Mutation = (*DailyContentMutation)(nil) + +// dailycontentOption allows management of the mutation configuration using functional options. +type dailycontentOption func(*DailyContentMutation) + +// newDailyContentMutation creates new mutation for the DailyContent entity. +func newDailyContentMutation(c config, op Op, opts ...dailycontentOption) *DailyContentMutation { + m := &DailyContentMutation{ + config: c, + op: op, + typ: TypeDailyContent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withDailyContentID sets the ID field of the mutation. +func withDailyContentID(id int) dailycontentOption { + return func(m *DailyContentMutation) { + var ( + err error + once sync.Once + value *DailyContent + ) + m.oldValue = func(ctx context.Context) (*DailyContent, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().DailyContent.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withDailyContent sets the old DailyContent of the mutation. +func withDailyContent(node *DailyContent) dailycontentOption { + return func(m *DailyContentMutation) { + m.oldValue = func(context.Context) (*DailyContent, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m DailyContentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m DailyContentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *DailyContentMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *DailyContentMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().DailyContent.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetLanguageCode sets the "language_code" field. +func (m *DailyContentMutation) SetLanguageCode(dc dailycontent.LanguageCode) { + m.language_code = &dc +} + +// LanguageCode returns the value of the "language_code" field in the mutation. +func (m *DailyContentMutation) LanguageCode() (r dailycontent.LanguageCode, exists bool) { + v := m.language_code + if v == nil { + return + } + return *v, true +} + +// OldLanguageCode returns the old "language_code" field's value of the DailyContent entity. +// If the DailyContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyContentMutation) OldLanguageCode(ctx context.Context) (v dailycontent.LanguageCode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLanguageCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLanguageCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLanguageCode: %w", err) + } + return oldValue.LanguageCode, nil +} + +// ResetLanguageCode resets all changes to the "language_code" field. +func (m *DailyContentMutation) ResetLanguageCode() { + m.language_code = nil +} + +// SetQuote sets the "quote" field. +func (m *DailyContentMutation) SetQuote(s string) { + m.quote = &s +} + +// Quote returns the value of the "quote" field in the mutation. +func (m *DailyContentMutation) Quote() (r string, exists bool) { + v := m.quote + if v == nil { + return + } + return *v, true +} + +// OldQuote returns the old "quote" field's value of the DailyContent entity. +// If the DailyContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *DailyContentMutation) OldQuote(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldQuote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldQuote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldQuote: %w", err) + } + return oldValue.Quote, nil +} + +// ResetQuote resets all changes to the "quote" field. +func (m *DailyContentMutation) ResetQuote() { + m.quote = nil +} + +// SetDailyID sets the "daily" edge to the Daily entity by id. +func (m *DailyContentMutation) SetDailyID(id string) { + m.daily = &id +} + +// ClearDaily clears the "daily" edge to the Daily entity. +func (m *DailyContentMutation) ClearDaily() { + m.cleareddaily = true +} + +// DailyCleared reports if the "daily" edge to the Daily entity was cleared. +func (m *DailyContentMutation) DailyCleared() bool { + return m.cleareddaily +} + +// DailyID returns the "daily" edge ID in the mutation. +func (m *DailyContentMutation) DailyID() (id string, exists bool) { + if m.daily != nil { + return *m.daily, true + } + return +} + +// DailyIDs returns the "daily" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DailyID instead. It exists only for internal usage by the builders. +func (m *DailyContentMutation) DailyIDs() (ids []string) { + if id := m.daily; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDaily resets all changes to the "daily" edge. +func (m *DailyContentMutation) ResetDaily() { + m.daily = nil + m.cleareddaily = false +} + +// Where appends a list predicates to the DailyContentMutation builder. +func (m *DailyContentMutation) Where(ps ...predicate.DailyContent) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the DailyContentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DailyContentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.DailyContent, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *DailyContentMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *DailyContentMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (DailyContent). +func (m *DailyContentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *DailyContentMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.language_code != nil { + fields = append(fields, dailycontent.FieldLanguageCode) + } + if m.quote != nil { + fields = append(fields, dailycontent.FieldQuote) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *DailyContentMutation) Field(name string) (ent.Value, bool) { + switch name { + case dailycontent.FieldLanguageCode: + return m.LanguageCode() + case dailycontent.FieldQuote: + return m.Quote() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *DailyContentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case dailycontent.FieldLanguageCode: + return m.OldLanguageCode(ctx) + case dailycontent.FieldQuote: + return m.OldQuote(ctx) + } + return nil, fmt.Errorf("unknown DailyContent field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyContentMutation) SetField(name string, value ent.Value) error { + switch name { + case dailycontent.FieldLanguageCode: + v, ok := value.(dailycontent.LanguageCode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLanguageCode(v) + return nil + case dailycontent.FieldQuote: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetQuote(v) + return nil + } + return fmt.Errorf("unknown DailyContent field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *DailyContentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *DailyContentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *DailyContentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown DailyContent numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *DailyContentMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *DailyContentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *DailyContentMutation) ClearField(name string) error { + return fmt.Errorf("unknown DailyContent nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *DailyContentMutation) ResetField(name string) error { + switch name { + case dailycontent.FieldLanguageCode: + m.ResetLanguageCode() + return nil + case dailycontent.FieldQuote: + m.ResetQuote() + return nil + } + return fmt.Errorf("unknown DailyContent field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *DailyContentMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.daily != nil { + edges = append(edges, dailycontent.EdgeDaily) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *DailyContentMutation) AddedIDs(name string) []ent.Value { + switch name { + case dailycontent.EdgeDaily: + if id := m.daily; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *DailyContentMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *DailyContentMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *DailyContentMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.cleareddaily { + edges = append(edges, dailycontent.EdgeDaily) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *DailyContentMutation) EdgeCleared(name string) bool { + switch name { + case dailycontent.EdgeDaily: + return m.cleareddaily + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *DailyContentMutation) ClearEdge(name string) error { + switch name { + case dailycontent.EdgeDaily: + m.ClearDaily() + return nil + } + return fmt.Errorf("unknown DailyContent unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *DailyContentMutation) ResetEdge(name string) error { + switch name { + case dailycontent.EdgeDaily: + m.ResetDaily() + return nil + } + return fmt.Errorf("unknown DailyContent edge %s", name) +} + +// MediaMutation represents an operation that mutates the Media nodes in the graph. +type MediaMutation struct { + config + op Op + typ string + id *int + storage_id *string + original_name *string + mime_type *string + size *int64 + addsize *int64 + url *string + created_at *time.Time + updated_at *time.Time + created_by *string + clearedFields map[string]struct{} + owner *int + clearedowner bool + done bool + oldValue func(context.Context) (*Media, error) + predicates []predicate.Media +} + +var _ ent.Mutation = (*MediaMutation)(nil) + +// mediaOption allows management of the mutation configuration using functional options. +type mediaOption func(*MediaMutation) + +// newMediaMutation creates new mutation for the Media entity. +func newMediaMutation(c config, op Op, opts ...mediaOption) *MediaMutation { + m := &MediaMutation{ + config: c, + op: op, + typ: TypeMedia, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMediaID sets the ID field of the mutation. +func withMediaID(id int) mediaOption { + return func(m *MediaMutation) { + var ( + err error + once sync.Once + value *Media + ) + m.oldValue = func(ctx context.Context) (*Media, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Media.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMedia sets the old Media of the mutation. +func withMedia(node *Media) mediaOption { + return func(m *MediaMutation) { + m.oldValue = func(context.Context) (*Media, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MediaMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MediaMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MediaMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MediaMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Media.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetStorageID sets the "storage_id" field. +func (m *MediaMutation) SetStorageID(s string) { + m.storage_id = &s +} + +// StorageID returns the value of the "storage_id" field in the mutation. +func (m *MediaMutation) StorageID() (r string, exists bool) { + v := m.storage_id + if v == nil { + return + } + return *v, true +} + +// OldStorageID returns the old "storage_id" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldStorageID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStorageID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStorageID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStorageID: %w", err) + } + return oldValue.StorageID, nil +} + +// ResetStorageID resets all changes to the "storage_id" field. +func (m *MediaMutation) ResetStorageID() { + m.storage_id = nil +} + +// SetOriginalName sets the "original_name" field. +func (m *MediaMutation) SetOriginalName(s string) { + m.original_name = &s +} + +// OriginalName returns the value of the "original_name" field in the mutation. +func (m *MediaMutation) OriginalName() (r string, exists bool) { + v := m.original_name + if v == nil { + return + } + return *v, true +} + +// OldOriginalName returns the old "original_name" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldOriginalName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOriginalName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOriginalName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOriginalName: %w", err) + } + return oldValue.OriginalName, nil +} + +// ResetOriginalName resets all changes to the "original_name" field. +func (m *MediaMutation) ResetOriginalName() { + m.original_name = nil +} + +// SetMimeType sets the "mime_type" field. +func (m *MediaMutation) SetMimeType(s string) { + m.mime_type = &s +} + +// MimeType returns the value of the "mime_type" field in the mutation. +func (m *MediaMutation) MimeType() (r string, exists bool) { + v := m.mime_type + if v == nil { + return + } + return *v, true +} + +// OldMimeType returns the old "mime_type" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldMimeType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMimeType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMimeType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMimeType: %w", err) + } + return oldValue.MimeType, nil +} + +// ResetMimeType resets all changes to the "mime_type" field. +func (m *MediaMutation) ResetMimeType() { + m.mime_type = nil +} + +// SetSize sets the "size" field. +func (m *MediaMutation) SetSize(i int64) { + m.size = &i + m.addsize = nil +} + +// Size returns the value of the "size" field in the mutation. +func (m *MediaMutation) Size() (r int64, exists bool) { + v := m.size + if v == nil { + return + } + return *v, true +} + +// OldSize returns the old "size" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldSize(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSize is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSize requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSize: %w", err) + } + return oldValue.Size, nil +} + +// AddSize adds i to the "size" field. +func (m *MediaMutation) AddSize(i int64) { + if m.addsize != nil { + *m.addsize += i + } else { + m.addsize = &i + } +} + +// AddedSize returns the value that was added to the "size" field in this mutation. +func (m *MediaMutation) AddedSize() (r int64, exists bool) { + v := m.addsize + if v == nil { + return + } + return *v, true +} + +// ResetSize resets all changes to the "size" field. +func (m *MediaMutation) ResetSize() { + m.size = nil + m.addsize = nil +} + +// SetURL sets the "url" field. +func (m *MediaMutation) SetURL(s string) { + m.url = &s +} + +// URL returns the value of the "url" field in the mutation. +func (m *MediaMutation) URL() (r string, exists bool) { + v := m.url + if v == nil { + return + } + return *v, true +} + +// OldURL returns the old "url" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURL: %w", err) + } + return oldValue.URL, nil +} + +// ResetURL resets all changes to the "url" field. +func (m *MediaMutation) ResetURL() { + m.url = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *MediaMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *MediaMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *MediaMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *MediaMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *MediaMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *MediaMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetCreatedBy sets the "created_by" field. +func (m *MediaMutation) SetCreatedBy(s string) { + m.created_by = &s +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *MediaMutation) CreatedBy() (r string, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the Media entity. +// If the Media object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MediaMutation) OldCreatedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// ClearCreatedBy clears the value of the "created_by" field. +func (m *MediaMutation) ClearCreatedBy() { + m.created_by = nil + m.clearedFields[media.FieldCreatedBy] = struct{}{} +} + +// CreatedByCleared returns if the "created_by" field was cleared in this mutation. +func (m *MediaMutation) CreatedByCleared() bool { + _, ok := m.clearedFields[media.FieldCreatedBy] + return ok +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *MediaMutation) ResetCreatedBy() { + m.created_by = nil + delete(m.clearedFields, media.FieldCreatedBy) +} + +// SetOwnerID sets the "owner" edge to the User entity by id. +func (m *MediaMutation) SetOwnerID(id int) { + m.owner = &id +} + +// ClearOwner clears the "owner" edge to the User entity. +func (m *MediaMutation) ClearOwner() { + m.clearedowner = true +} + +// OwnerCleared reports if the "owner" edge to the User entity was cleared. +func (m *MediaMutation) OwnerCleared() bool { + return m.clearedowner +} + +// OwnerID returns the "owner" edge ID in the mutation. +func (m *MediaMutation) OwnerID() (id int, exists bool) { + if m.owner != nil { + return *m.owner, true + } + return +} + +// OwnerIDs returns the "owner" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// OwnerID instead. It exists only for internal usage by the builders. +func (m *MediaMutation) OwnerIDs() (ids []int) { + if id := m.owner; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetOwner resets all changes to the "owner" edge. +func (m *MediaMutation) ResetOwner() { + m.owner = nil + m.clearedowner = false +} + +// Where appends a list predicates to the MediaMutation builder. +func (m *MediaMutation) Where(ps ...predicate.Media) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the MediaMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MediaMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Media, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *MediaMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *MediaMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Media). +func (m *MediaMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MediaMutation) Fields() []string { + fields := make([]string, 0, 8) + if m.storage_id != nil { + fields = append(fields, media.FieldStorageID) + } + if m.original_name != nil { + fields = append(fields, media.FieldOriginalName) + } + if m.mime_type != nil { + fields = append(fields, media.FieldMimeType) + } + if m.size != nil { + fields = append(fields, media.FieldSize) + } + if m.url != nil { + fields = append(fields, media.FieldURL) + } + if m.created_at != nil { + fields = append(fields, media.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, media.FieldUpdatedAt) + } + if m.created_by != nil { + fields = append(fields, media.FieldCreatedBy) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MediaMutation) Field(name string) (ent.Value, bool) { + switch name { + case media.FieldStorageID: + return m.StorageID() + case media.FieldOriginalName: + return m.OriginalName() + case media.FieldMimeType: + return m.MimeType() + case media.FieldSize: + return m.Size() + case media.FieldURL: + return m.URL() + case media.FieldCreatedAt: + return m.CreatedAt() + case media.FieldUpdatedAt: + return m.UpdatedAt() + case media.FieldCreatedBy: + return m.CreatedBy() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MediaMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case media.FieldStorageID: + return m.OldStorageID(ctx) + case media.FieldOriginalName: + return m.OldOriginalName(ctx) + case media.FieldMimeType: + return m.OldMimeType(ctx) + case media.FieldSize: + return m.OldSize(ctx) + case media.FieldURL: + return m.OldURL(ctx) + case media.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case media.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case media.FieldCreatedBy: + return m.OldCreatedBy(ctx) + } + return nil, fmt.Errorf("unknown Media field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MediaMutation) SetField(name string, value ent.Value) error { + switch name { + case media.FieldStorageID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStorageID(v) + return nil + case media.FieldOriginalName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOriginalName(v) + return nil + case media.FieldMimeType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMimeType(v) + return nil + case media.FieldSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSize(v) + return nil + case media.FieldURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURL(v) + return nil + case media.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case media.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case media.FieldCreatedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + } + return fmt.Errorf("unknown Media field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MediaMutation) AddedFields() []string { + var fields []string + if m.addsize != nil { + fields = append(fields, media.FieldSize) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MediaMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case media.FieldSize: + return m.AddedSize() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MediaMutation) AddField(name string, value ent.Value) error { + switch name { + case media.FieldSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSize(v) + return nil + } + return fmt.Errorf("unknown Media numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MediaMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(media.FieldCreatedBy) { + fields = append(fields, media.FieldCreatedBy) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MediaMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MediaMutation) ClearField(name string) error { + switch name { + case media.FieldCreatedBy: + m.ClearCreatedBy() + return nil + } + return fmt.Errorf("unknown Media nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MediaMutation) ResetField(name string) error { + switch name { + case media.FieldStorageID: + m.ResetStorageID() + return nil + case media.FieldOriginalName: + m.ResetOriginalName() + return nil + case media.FieldMimeType: + m.ResetMimeType() + return nil + case media.FieldSize: + m.ResetSize() + return nil + case media.FieldURL: + m.ResetURL() + return nil + case media.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case media.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case media.FieldCreatedBy: + m.ResetCreatedBy() + return nil + } + return fmt.Errorf("unknown Media field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MediaMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.owner != nil { + edges = append(edges, media.EdgeOwner) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MediaMutation) AddedIDs(name string) []ent.Value { + switch name { + case media.EdgeOwner: + if id := m.owner; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MediaMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MediaMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MediaMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedowner { + edges = append(edges, media.EdgeOwner) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MediaMutation) EdgeCleared(name string) bool { + switch name { + case media.EdgeOwner: + return m.clearedowner + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MediaMutation) ClearEdge(name string) error { + switch name { + case media.EdgeOwner: + m.ClearOwner() + return nil + } + return fmt.Errorf("unknown Media unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MediaMutation) ResetEdge(name string) error { + switch name { + case media.EdgeOwner: + m.ResetOwner() + return nil + } + return fmt.Errorf("unknown Media edge %s", name) +} + +// PermissionMutation represents an operation that mutates the Permission nodes in the graph. +type PermissionMutation struct { + config + op Op + typ string + id *int + resource *string + action *string + description *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + roles map[int]struct{} + removedroles map[int]struct{} + clearedroles bool + done bool + oldValue func(context.Context) (*Permission, error) + predicates []predicate.Permission +} + +var _ ent.Mutation = (*PermissionMutation)(nil) + +// permissionOption allows management of the mutation configuration using functional options. +type permissionOption func(*PermissionMutation) + +// newPermissionMutation creates new mutation for the Permission entity. +func newPermissionMutation(c config, op Op, opts ...permissionOption) *PermissionMutation { + m := &PermissionMutation{ + config: c, + op: op, + typ: TypePermission, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPermissionID sets the ID field of the mutation. +func withPermissionID(id int) permissionOption { + return func(m *PermissionMutation) { + var ( + err error + once sync.Once + value *Permission + ) + m.oldValue = func(ctx context.Context) (*Permission, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Permission.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPermission sets the old Permission of the mutation. +func withPermission(node *Permission) permissionOption { + return func(m *PermissionMutation) { + m.oldValue = func(context.Context) (*Permission, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PermissionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PermissionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PermissionMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PermissionMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Permission.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetResource sets the "resource" field. +func (m *PermissionMutation) SetResource(s string) { + m.resource = &s +} + +// Resource returns the value of the "resource" field in the mutation. +func (m *PermissionMutation) Resource() (r string, exists bool) { + v := m.resource + if v == nil { + return + } + return *v, true +} + +// OldResource returns the old "resource" field's value of the Permission entity. +// If the Permission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PermissionMutation) OldResource(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResource is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResource requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResource: %w", err) + } + return oldValue.Resource, nil +} + +// ResetResource resets all changes to the "resource" field. +func (m *PermissionMutation) ResetResource() { + m.resource = nil +} + +// SetAction sets the "action" field. +func (m *PermissionMutation) SetAction(s string) { + m.action = &s +} + +// Action returns the value of the "action" field in the mutation. +func (m *PermissionMutation) Action() (r string, exists bool) { + v := m.action + if v == nil { + return + } + return *v, true +} + +// OldAction returns the old "action" field's value of the Permission entity. +// If the Permission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PermissionMutation) OldAction(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAction is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAction requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAction: %w", err) + } + return oldValue.Action, nil +} + +// ResetAction resets all changes to the "action" field. +func (m *PermissionMutation) ResetAction() { + m.action = nil +} + +// SetDescription sets the "description" field. +func (m *PermissionMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *PermissionMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Permission entity. +// If the Permission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PermissionMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *PermissionMutation) ClearDescription() { + m.description = nil + m.clearedFields[permission.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *PermissionMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[permission.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *PermissionMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, permission.FieldDescription) +} + +// SetCreatedAt sets the "created_at" field. +func (m *PermissionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *PermissionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Permission entity. +// If the Permission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PermissionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *PermissionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *PermissionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *PermissionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Permission entity. +// If the Permission object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PermissionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *PermissionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddRoleIDs adds the "roles" edge to the Role entity by ids. +func (m *PermissionMutation) AddRoleIDs(ids ...int) { + if m.roles == nil { + m.roles = make(map[int]struct{}) + } + for i := range ids { + m.roles[ids[i]] = struct{}{} + } +} + +// ClearRoles clears the "roles" edge to the Role entity. +func (m *PermissionMutation) ClearRoles() { + m.clearedroles = true +} + +// RolesCleared reports if the "roles" edge to the Role entity was cleared. +func (m *PermissionMutation) RolesCleared() bool { + return m.clearedroles +} + +// RemoveRoleIDs removes the "roles" edge to the Role entity by IDs. +func (m *PermissionMutation) RemoveRoleIDs(ids ...int) { + if m.removedroles == nil { + m.removedroles = make(map[int]struct{}) + } + for i := range ids { + delete(m.roles, ids[i]) + m.removedroles[ids[i]] = struct{}{} + } +} + +// RemovedRoles returns the removed IDs of the "roles" edge to the Role entity. +func (m *PermissionMutation) RemovedRolesIDs() (ids []int) { + for id := range m.removedroles { + ids = append(ids, id) + } + return +} + +// RolesIDs returns the "roles" edge IDs in the mutation. +func (m *PermissionMutation) RolesIDs() (ids []int) { + for id := range m.roles { + ids = append(ids, id) + } + return +} + +// ResetRoles resets all changes to the "roles" edge. +func (m *PermissionMutation) ResetRoles() { + m.roles = nil + m.clearedroles = false + m.removedroles = nil +} + +// Where appends a list predicates to the PermissionMutation builder. +func (m *PermissionMutation) Where(ps ...predicate.Permission) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PermissionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PermissionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Permission, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PermissionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PermissionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Permission). +func (m *PermissionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PermissionMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.resource != nil { + fields = append(fields, permission.FieldResource) + } + if m.action != nil { + fields = append(fields, permission.FieldAction) + } + if m.description != nil { + fields = append(fields, permission.FieldDescription) + } + if m.created_at != nil { + fields = append(fields, permission.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, permission.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PermissionMutation) Field(name string) (ent.Value, bool) { + switch name { + case permission.FieldResource: + return m.Resource() + case permission.FieldAction: + return m.Action() + case permission.FieldDescription: + return m.Description() + case permission.FieldCreatedAt: + return m.CreatedAt() + case permission.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PermissionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case permission.FieldResource: + return m.OldResource(ctx) + case permission.FieldAction: + return m.OldAction(ctx) + case permission.FieldDescription: + return m.OldDescription(ctx) + case permission.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case permission.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Permission field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PermissionMutation) SetField(name string, value ent.Value) error { + switch name { + case permission.FieldResource: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResource(v) + return nil + case permission.FieldAction: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAction(v) + return nil + case permission.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case permission.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case permission.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Permission field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PermissionMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PermissionMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PermissionMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Permission numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PermissionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(permission.FieldDescription) { + fields = append(fields, permission.FieldDescription) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PermissionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PermissionMutation) ClearField(name string) error { + switch name { + case permission.FieldDescription: + m.ClearDescription() + return nil + } + return fmt.Errorf("unknown Permission nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PermissionMutation) ResetField(name string) error { + switch name { + case permission.FieldResource: + m.ResetResource() + return nil + case permission.FieldAction: + m.ResetAction() + return nil + case permission.FieldDescription: + m.ResetDescription() + return nil + case permission.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case permission.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Permission field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PermissionMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.roles != nil { + edges = append(edges, permission.EdgeRoles) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PermissionMutation) AddedIDs(name string) []ent.Value { + switch name { + case permission.EdgeRoles: + ids := make([]ent.Value, 0, len(m.roles)) + for id := range m.roles { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PermissionMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedroles != nil { + edges = append(edges, permission.EdgeRoles) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PermissionMutation) RemovedIDs(name string) []ent.Value { + switch name { + case permission.EdgeRoles: + ids := make([]ent.Value, 0, len(m.removedroles)) + for id := range m.removedroles { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PermissionMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedroles { + edges = append(edges, permission.EdgeRoles) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PermissionMutation) EdgeCleared(name string) bool { + switch name { + case permission.EdgeRoles: + return m.clearedroles + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PermissionMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Permission unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PermissionMutation) ResetEdge(name string) error { + switch name { + case permission.EdgeRoles: + m.ResetRoles() + return nil + } + return fmt.Errorf("unknown Permission edge %s", name) +} + +// PostMutation represents an operation that mutates the Post nodes in the graph. +type PostMutation struct { + config + op Op + typ string + id *int + status *post.Status + slug *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + contents map[int]struct{} + removedcontents map[int]struct{} + clearedcontents bool + contributors map[int]struct{} + removedcontributors map[int]struct{} + clearedcontributors bool + category *int + clearedcategory bool + done bool + oldValue func(context.Context) (*Post, error) + predicates []predicate.Post +} + +var _ ent.Mutation = (*PostMutation)(nil) + +// postOption allows management of the mutation configuration using functional options. +type postOption func(*PostMutation) + +// newPostMutation creates new mutation for the Post entity. +func newPostMutation(c config, op Op, opts ...postOption) *PostMutation { + m := &PostMutation{ + config: c, + op: op, + typ: TypePost, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPostID sets the ID field of the mutation. +func withPostID(id int) postOption { + return func(m *PostMutation) { + var ( + err error + once sync.Once + value *Post + ) + m.oldValue = func(ctx context.Context) (*Post, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Post.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPost sets the old Post of the mutation. +func withPost(node *Post) postOption { + return func(m *PostMutation) { + m.oldValue = func(context.Context) (*Post, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PostMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PostMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PostMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PostMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Post.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetStatus sets the "status" field. +func (m *PostMutation) SetStatus(po post.Status) { + m.status = &po +} + +// Status returns the value of the "status" field in the mutation. +func (m *PostMutation) Status() (r post.Status, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Post entity. +// If the Post object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostMutation) OldStatus(ctx context.Context) (v post.Status, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *PostMutation) ResetStatus() { + m.status = nil +} + +// SetSlug sets the "slug" field. +func (m *PostMutation) SetSlug(s string) { + m.slug = &s +} + +// Slug returns the value of the "slug" field in the mutation. +func (m *PostMutation) Slug() (r string, exists bool) { + v := m.slug + if v == nil { + return + } + return *v, true +} + +// OldSlug returns the old "slug" field's value of the Post entity. +// If the Post object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostMutation) OldSlug(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSlug is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSlug requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSlug: %w", err) + } + return oldValue.Slug, nil +} + +// ResetSlug resets all changes to the "slug" field. +func (m *PostMutation) ResetSlug() { + m.slug = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *PostMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *PostMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Post entity. +// If the Post object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *PostMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *PostMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *PostMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Post entity. +// If the Post object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *PostMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddContentIDs adds the "contents" edge to the PostContent entity by ids. +func (m *PostMutation) AddContentIDs(ids ...int) { + if m.contents == nil { + m.contents = make(map[int]struct{}) + } + for i := range ids { + m.contents[ids[i]] = struct{}{} + } +} + +// ClearContents clears the "contents" edge to the PostContent entity. +func (m *PostMutation) ClearContents() { + m.clearedcontents = true +} + +// ContentsCleared reports if the "contents" edge to the PostContent entity was cleared. +func (m *PostMutation) ContentsCleared() bool { + return m.clearedcontents +} + +// RemoveContentIDs removes the "contents" edge to the PostContent entity by IDs. +func (m *PostMutation) RemoveContentIDs(ids ...int) { + if m.removedcontents == nil { + m.removedcontents = make(map[int]struct{}) + } + for i := range ids { + delete(m.contents, ids[i]) + m.removedcontents[ids[i]] = struct{}{} + } +} + +// RemovedContents returns the removed IDs of the "contents" edge to the PostContent entity. +func (m *PostMutation) RemovedContentsIDs() (ids []int) { + for id := range m.removedcontents { + ids = append(ids, id) + } + return +} + +// ContentsIDs returns the "contents" edge IDs in the mutation. +func (m *PostMutation) ContentsIDs() (ids []int) { + for id := range m.contents { + ids = append(ids, id) + } + return +} + +// ResetContents resets all changes to the "contents" edge. +func (m *PostMutation) ResetContents() { + m.contents = nil + m.clearedcontents = false + m.removedcontents = nil +} + +// AddContributorIDs adds the "contributors" edge to the PostContributor entity by ids. +func (m *PostMutation) AddContributorIDs(ids ...int) { + if m.contributors == nil { + m.contributors = make(map[int]struct{}) + } + for i := range ids { + m.contributors[ids[i]] = struct{}{} + } +} + +// ClearContributors clears the "contributors" edge to the PostContributor entity. +func (m *PostMutation) ClearContributors() { + m.clearedcontributors = true +} + +// ContributorsCleared reports if the "contributors" edge to the PostContributor entity was cleared. +func (m *PostMutation) ContributorsCleared() bool { + return m.clearedcontributors +} + +// RemoveContributorIDs removes the "contributors" edge to the PostContributor entity by IDs. +func (m *PostMutation) RemoveContributorIDs(ids ...int) { + if m.removedcontributors == nil { + m.removedcontributors = make(map[int]struct{}) + } + for i := range ids { + delete(m.contributors, ids[i]) + m.removedcontributors[ids[i]] = struct{}{} + } +} + +// RemovedContributors returns the removed IDs of the "contributors" edge to the PostContributor entity. +func (m *PostMutation) RemovedContributorsIDs() (ids []int) { + for id := range m.removedcontributors { + ids = append(ids, id) + } + return +} + +// ContributorsIDs returns the "contributors" edge IDs in the mutation. +func (m *PostMutation) ContributorsIDs() (ids []int) { + for id := range m.contributors { + ids = append(ids, id) + } + return +} + +// ResetContributors resets all changes to the "contributors" edge. +func (m *PostMutation) ResetContributors() { + m.contributors = nil + m.clearedcontributors = false + m.removedcontributors = nil +} + +// SetCategoryID sets the "category" edge to the Category entity by id. +func (m *PostMutation) SetCategoryID(id int) { + m.category = &id +} + +// ClearCategory clears the "category" edge to the Category entity. +func (m *PostMutation) ClearCategory() { + m.clearedcategory = true +} + +// CategoryCleared reports if the "category" edge to the Category entity was cleared. +func (m *PostMutation) CategoryCleared() bool { + return m.clearedcategory +} + +// CategoryID returns the "category" edge ID in the mutation. +func (m *PostMutation) CategoryID() (id int, exists bool) { + if m.category != nil { + return *m.category, true + } + return +} + +// CategoryIDs returns the "category" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// CategoryID instead. It exists only for internal usage by the builders. +func (m *PostMutation) CategoryIDs() (ids []int) { + if id := m.category; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetCategory resets all changes to the "category" edge. +func (m *PostMutation) ResetCategory() { + m.category = nil + m.clearedcategory = false +} + +// Where appends a list predicates to the PostMutation builder. +func (m *PostMutation) Where(ps ...predicate.Post) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PostMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PostMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Post, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PostMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PostMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Post). +func (m *PostMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PostMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.status != nil { + fields = append(fields, post.FieldStatus) + } + if m.slug != nil { + fields = append(fields, post.FieldSlug) + } + if m.created_at != nil { + fields = append(fields, post.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, post.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PostMutation) Field(name string) (ent.Value, bool) { + switch name { + case post.FieldStatus: + return m.Status() + case post.FieldSlug: + return m.Slug() + case post.FieldCreatedAt: + return m.CreatedAt() + case post.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PostMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case post.FieldStatus: + return m.OldStatus(ctx) + case post.FieldSlug: + return m.OldSlug(ctx) + case post.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case post.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Post field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostMutation) SetField(name string, value ent.Value) error { + switch name { + case post.FieldStatus: + v, ok := value.(post.Status) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case post.FieldSlug: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSlug(v) + return nil + case post.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case post.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Post field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PostMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PostMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Post numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PostMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PostMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PostMutation) ClearField(name string) error { + return fmt.Errorf("unknown Post nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PostMutation) ResetField(name string) error { + switch name { + case post.FieldStatus: + m.ResetStatus() + return nil + case post.FieldSlug: + m.ResetSlug() + return nil + case post.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case post.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Post field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PostMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.contents != nil { + edges = append(edges, post.EdgeContents) + } + if m.contributors != nil { + edges = append(edges, post.EdgeContributors) + } + if m.category != nil { + edges = append(edges, post.EdgeCategory) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PostMutation) AddedIDs(name string) []ent.Value { + switch name { + case post.EdgeContents: + ids := make([]ent.Value, 0, len(m.contents)) + for id := range m.contents { + ids = append(ids, id) + } + return ids + case post.EdgeContributors: + ids := make([]ent.Value, 0, len(m.contributors)) + for id := range m.contributors { + ids = append(ids, id) + } + return ids + case post.EdgeCategory: + if id := m.category; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PostMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedcontents != nil { + edges = append(edges, post.EdgeContents) + } + if m.removedcontributors != nil { + edges = append(edges, post.EdgeContributors) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PostMutation) RemovedIDs(name string) []ent.Value { + switch name { + case post.EdgeContents: + ids := make([]ent.Value, 0, len(m.removedcontents)) + for id := range m.removedcontents { + ids = append(ids, id) + } + return ids + case post.EdgeContributors: + ids := make([]ent.Value, 0, len(m.removedcontributors)) + for id := range m.removedcontributors { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PostMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedcontents { + edges = append(edges, post.EdgeContents) + } + if m.clearedcontributors { + edges = append(edges, post.EdgeContributors) + } + if m.clearedcategory { + edges = append(edges, post.EdgeCategory) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PostMutation) EdgeCleared(name string) bool { + switch name { + case post.EdgeContents: + return m.clearedcontents + case post.EdgeContributors: + return m.clearedcontributors + case post.EdgeCategory: + return m.clearedcategory + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PostMutation) ClearEdge(name string) error { + switch name { + case post.EdgeCategory: + m.ClearCategory() + return nil + } + return fmt.Errorf("unknown Post unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PostMutation) ResetEdge(name string) error { + switch name { + case post.EdgeContents: + m.ResetContents() + return nil + case post.EdgeContributors: + m.ResetContributors() + return nil + case post.EdgeCategory: + m.ResetCategory() + return nil + } + return fmt.Errorf("unknown Post edge %s", name) +} + +// PostContentMutation represents an operation that mutates the PostContent nodes in the graph. +type PostContentMutation struct { + config + op Op + typ string + id *int + language_code *postcontent.LanguageCode + title *string + content_markdown *string + summary *string + meta_keywords *string + meta_description *string + slug *string + clearedFields map[string]struct{} + post *int + clearedpost bool + done bool + oldValue func(context.Context) (*PostContent, error) + predicates []predicate.PostContent +} + +var _ ent.Mutation = (*PostContentMutation)(nil) + +// postcontentOption allows management of the mutation configuration using functional options. +type postcontentOption func(*PostContentMutation) + +// newPostContentMutation creates new mutation for the PostContent entity. +func newPostContentMutation(c config, op Op, opts ...postcontentOption) *PostContentMutation { + m := &PostContentMutation{ + config: c, + op: op, + typ: TypePostContent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPostContentID sets the ID field of the mutation. +func withPostContentID(id int) postcontentOption { + return func(m *PostContentMutation) { + var ( + err error + once sync.Once + value *PostContent + ) + m.oldValue = func(ctx context.Context) (*PostContent, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PostContent.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPostContent sets the old PostContent of the mutation. +func withPostContent(node *PostContent) postcontentOption { + return func(m *PostContentMutation) { + m.oldValue = func(context.Context) (*PostContent, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PostContentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PostContentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PostContentMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PostContentMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PostContent.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetLanguageCode sets the "language_code" field. +func (m *PostContentMutation) SetLanguageCode(pc postcontent.LanguageCode) { + m.language_code = &pc +} + +// LanguageCode returns the value of the "language_code" field in the mutation. +func (m *PostContentMutation) LanguageCode() (r postcontent.LanguageCode, exists bool) { + v := m.language_code + if v == nil { + return + } + return *v, true +} + +// OldLanguageCode returns the old "language_code" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldLanguageCode(ctx context.Context) (v postcontent.LanguageCode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLanguageCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLanguageCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLanguageCode: %w", err) + } + return oldValue.LanguageCode, nil +} + +// ResetLanguageCode resets all changes to the "language_code" field. +func (m *PostContentMutation) ResetLanguageCode() { + m.language_code = nil +} + +// SetTitle sets the "title" field. +func (m *PostContentMutation) SetTitle(s string) { + m.title = &s +} + +// Title returns the value of the "title" field in the mutation. +func (m *PostContentMutation) Title() (r string, exists bool) { + v := m.title + if v == nil { + return + } + return *v, true +} + +// OldTitle returns the old "title" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldTitle(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTitle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTitle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTitle: %w", err) + } + return oldValue.Title, nil +} + +// ResetTitle resets all changes to the "title" field. +func (m *PostContentMutation) ResetTitle() { + m.title = nil +} + +// SetContentMarkdown sets the "content_markdown" field. +func (m *PostContentMutation) SetContentMarkdown(s string) { + m.content_markdown = &s +} + +// ContentMarkdown returns the value of the "content_markdown" field in the mutation. +func (m *PostContentMutation) ContentMarkdown() (r string, exists bool) { + v := m.content_markdown + if v == nil { + return + } + return *v, true +} + +// OldContentMarkdown returns the old "content_markdown" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldContentMarkdown(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldContentMarkdown is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldContentMarkdown requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldContentMarkdown: %w", err) + } + return oldValue.ContentMarkdown, nil +} + +// ResetContentMarkdown resets all changes to the "content_markdown" field. +func (m *PostContentMutation) ResetContentMarkdown() { + m.content_markdown = nil +} + +// SetSummary sets the "summary" field. +func (m *PostContentMutation) SetSummary(s string) { + m.summary = &s +} + +// Summary returns the value of the "summary" field in the mutation. +func (m *PostContentMutation) Summary() (r string, exists bool) { + v := m.summary + if v == nil { + return + } + return *v, true +} + +// OldSummary returns the old "summary" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldSummary(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSummary is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSummary requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSummary: %w", err) + } + return oldValue.Summary, nil +} + +// ResetSummary resets all changes to the "summary" field. +func (m *PostContentMutation) ResetSummary() { + m.summary = nil +} + +// SetMetaKeywords sets the "meta_keywords" field. +func (m *PostContentMutation) SetMetaKeywords(s string) { + m.meta_keywords = &s +} + +// MetaKeywords returns the value of the "meta_keywords" field in the mutation. +func (m *PostContentMutation) MetaKeywords() (r string, exists bool) { + v := m.meta_keywords + if v == nil { + return + } + return *v, true +} + +// OldMetaKeywords returns the old "meta_keywords" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldMetaKeywords(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMetaKeywords is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMetaKeywords requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMetaKeywords: %w", err) + } + return oldValue.MetaKeywords, nil +} + +// ClearMetaKeywords clears the value of the "meta_keywords" field. +func (m *PostContentMutation) ClearMetaKeywords() { + m.meta_keywords = nil + m.clearedFields[postcontent.FieldMetaKeywords] = struct{}{} +} + +// MetaKeywordsCleared returns if the "meta_keywords" field was cleared in this mutation. +func (m *PostContentMutation) MetaKeywordsCleared() bool { + _, ok := m.clearedFields[postcontent.FieldMetaKeywords] + return ok +} + +// ResetMetaKeywords resets all changes to the "meta_keywords" field. +func (m *PostContentMutation) ResetMetaKeywords() { + m.meta_keywords = nil + delete(m.clearedFields, postcontent.FieldMetaKeywords) +} + +// SetMetaDescription sets the "meta_description" field. +func (m *PostContentMutation) SetMetaDescription(s string) { + m.meta_description = &s +} + +// MetaDescription returns the value of the "meta_description" field in the mutation. +func (m *PostContentMutation) MetaDescription() (r string, exists bool) { + v := m.meta_description + if v == nil { + return + } + return *v, true +} + +// OldMetaDescription returns the old "meta_description" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldMetaDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMetaDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMetaDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMetaDescription: %w", err) + } + return oldValue.MetaDescription, nil +} + +// ClearMetaDescription clears the value of the "meta_description" field. +func (m *PostContentMutation) ClearMetaDescription() { + m.meta_description = nil + m.clearedFields[postcontent.FieldMetaDescription] = struct{}{} +} + +// MetaDescriptionCleared returns if the "meta_description" field was cleared in this mutation. +func (m *PostContentMutation) MetaDescriptionCleared() bool { + _, ok := m.clearedFields[postcontent.FieldMetaDescription] + return ok +} + +// ResetMetaDescription resets all changes to the "meta_description" field. +func (m *PostContentMutation) ResetMetaDescription() { + m.meta_description = nil + delete(m.clearedFields, postcontent.FieldMetaDescription) +} + +// SetSlug sets the "slug" field. +func (m *PostContentMutation) SetSlug(s string) { + m.slug = &s +} + +// Slug returns the value of the "slug" field in the mutation. +func (m *PostContentMutation) Slug() (r string, exists bool) { + v := m.slug + if v == nil { + return + } + return *v, true +} + +// OldSlug returns the old "slug" field's value of the PostContent entity. +// If the PostContent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContentMutation) OldSlug(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSlug is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSlug requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSlug: %w", err) + } + return oldValue.Slug, nil +} + +// ResetSlug resets all changes to the "slug" field. +func (m *PostContentMutation) ResetSlug() { + m.slug = nil +} + +// SetPostID sets the "post" edge to the Post entity by id. +func (m *PostContentMutation) SetPostID(id int) { + m.post = &id +} + +// ClearPost clears the "post" edge to the Post entity. +func (m *PostContentMutation) ClearPost() { + m.clearedpost = true +} + +// PostCleared reports if the "post" edge to the Post entity was cleared. +func (m *PostContentMutation) PostCleared() bool { + return m.clearedpost +} + +// PostID returns the "post" edge ID in the mutation. +func (m *PostContentMutation) PostID() (id int, exists bool) { + if m.post != nil { + return *m.post, true + } + return +} + +// PostIDs returns the "post" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PostID instead. It exists only for internal usage by the builders. +func (m *PostContentMutation) PostIDs() (ids []int) { + if id := m.post; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPost resets all changes to the "post" edge. +func (m *PostContentMutation) ResetPost() { + m.post = nil + m.clearedpost = false +} + +// Where appends a list predicates to the PostContentMutation builder. +func (m *PostContentMutation) Where(ps ...predicate.PostContent) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PostContentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PostContentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PostContent, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PostContentMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PostContentMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PostContent). +func (m *PostContentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PostContentMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.language_code != nil { + fields = append(fields, postcontent.FieldLanguageCode) + } + if m.title != nil { + fields = append(fields, postcontent.FieldTitle) + } + if m.content_markdown != nil { + fields = append(fields, postcontent.FieldContentMarkdown) + } + if m.summary != nil { + fields = append(fields, postcontent.FieldSummary) + } + if m.meta_keywords != nil { + fields = append(fields, postcontent.FieldMetaKeywords) + } + if m.meta_description != nil { + fields = append(fields, postcontent.FieldMetaDescription) + } + if m.slug != nil { + fields = append(fields, postcontent.FieldSlug) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PostContentMutation) Field(name string) (ent.Value, bool) { + switch name { + case postcontent.FieldLanguageCode: + return m.LanguageCode() + case postcontent.FieldTitle: + return m.Title() + case postcontent.FieldContentMarkdown: + return m.ContentMarkdown() + case postcontent.FieldSummary: + return m.Summary() + case postcontent.FieldMetaKeywords: + return m.MetaKeywords() + case postcontent.FieldMetaDescription: + return m.MetaDescription() + case postcontent.FieldSlug: + return m.Slug() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PostContentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case postcontent.FieldLanguageCode: + return m.OldLanguageCode(ctx) + case postcontent.FieldTitle: + return m.OldTitle(ctx) + case postcontent.FieldContentMarkdown: + return m.OldContentMarkdown(ctx) + case postcontent.FieldSummary: + return m.OldSummary(ctx) + case postcontent.FieldMetaKeywords: + return m.OldMetaKeywords(ctx) + case postcontent.FieldMetaDescription: + return m.OldMetaDescription(ctx) + case postcontent.FieldSlug: + return m.OldSlug(ctx) + } + return nil, fmt.Errorf("unknown PostContent field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostContentMutation) SetField(name string, value ent.Value) error { + switch name { + case postcontent.FieldLanguageCode: + v, ok := value.(postcontent.LanguageCode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLanguageCode(v) + return nil + case postcontent.FieldTitle: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTitle(v) + return nil + case postcontent.FieldContentMarkdown: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetContentMarkdown(v) + return nil + case postcontent.FieldSummary: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSummary(v) + return nil + case postcontent.FieldMetaKeywords: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMetaKeywords(v) + return nil + case postcontent.FieldMetaDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMetaDescription(v) + return nil + case postcontent.FieldSlug: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSlug(v) + return nil + } + return fmt.Errorf("unknown PostContent field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PostContentMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PostContentMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostContentMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown PostContent numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PostContentMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(postcontent.FieldMetaKeywords) { + fields = append(fields, postcontent.FieldMetaKeywords) + } + if m.FieldCleared(postcontent.FieldMetaDescription) { + fields = append(fields, postcontent.FieldMetaDescription) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PostContentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PostContentMutation) ClearField(name string) error { + switch name { + case postcontent.FieldMetaKeywords: + m.ClearMetaKeywords() + return nil + case postcontent.FieldMetaDescription: + m.ClearMetaDescription() + return nil + } + return fmt.Errorf("unknown PostContent nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PostContentMutation) ResetField(name string) error { + switch name { + case postcontent.FieldLanguageCode: + m.ResetLanguageCode() + return nil + case postcontent.FieldTitle: + m.ResetTitle() + return nil + case postcontent.FieldContentMarkdown: + m.ResetContentMarkdown() + return nil + case postcontent.FieldSummary: + m.ResetSummary() + return nil + case postcontent.FieldMetaKeywords: + m.ResetMetaKeywords() + return nil + case postcontent.FieldMetaDescription: + m.ResetMetaDescription() + return nil + case postcontent.FieldSlug: + m.ResetSlug() + return nil + } + return fmt.Errorf("unknown PostContent field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PostContentMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.post != nil { + edges = append(edges, postcontent.EdgePost) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PostContentMutation) AddedIDs(name string) []ent.Value { + switch name { + case postcontent.EdgePost: + if id := m.post; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PostContentMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PostContentMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PostContentMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedpost { + edges = append(edges, postcontent.EdgePost) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PostContentMutation) EdgeCleared(name string) bool { + switch name { + case postcontent.EdgePost: + return m.clearedpost + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PostContentMutation) ClearEdge(name string) error { + switch name { + case postcontent.EdgePost: + m.ClearPost() + return nil + } + return fmt.Errorf("unknown PostContent unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PostContentMutation) ResetEdge(name string) error { + switch name { + case postcontent.EdgePost: + m.ResetPost() + return nil + } + return fmt.Errorf("unknown PostContent edge %s", name) +} + +// PostContributorMutation represents an operation that mutates the PostContributor nodes in the graph. +type PostContributorMutation struct { + config + op Op + typ string + id *int + language_code *postcontributor.LanguageCode + created_at *time.Time + clearedFields map[string]struct{} + post *int + clearedpost bool + contributor *int + clearedcontributor bool + role *int + clearedrole bool + done bool + oldValue func(context.Context) (*PostContributor, error) + predicates []predicate.PostContributor +} + +var _ ent.Mutation = (*PostContributorMutation)(nil) + +// postcontributorOption allows management of the mutation configuration using functional options. +type postcontributorOption func(*PostContributorMutation) + +// newPostContributorMutation creates new mutation for the PostContributor entity. +func newPostContributorMutation(c config, op Op, opts ...postcontributorOption) *PostContributorMutation { + m := &PostContributorMutation{ + config: c, + op: op, + typ: TypePostContributor, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPostContributorID sets the ID field of the mutation. +func withPostContributorID(id int) postcontributorOption { + return func(m *PostContributorMutation) { + var ( + err error + once sync.Once + value *PostContributor + ) + m.oldValue = func(ctx context.Context) (*PostContributor, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PostContributor.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPostContributor sets the old PostContributor of the mutation. +func withPostContributor(node *PostContributor) postcontributorOption { + return func(m *PostContributorMutation) { + m.oldValue = func(context.Context) (*PostContributor, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PostContributorMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PostContributorMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PostContributorMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PostContributorMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PostContributor.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetLanguageCode sets the "language_code" field. +func (m *PostContributorMutation) SetLanguageCode(pc postcontributor.LanguageCode) { + m.language_code = &pc +} + +// LanguageCode returns the value of the "language_code" field in the mutation. +func (m *PostContributorMutation) LanguageCode() (r postcontributor.LanguageCode, exists bool) { + v := m.language_code + if v == nil { + return + } + return *v, true +} + +// OldLanguageCode returns the old "language_code" field's value of the PostContributor entity. +// If the PostContributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContributorMutation) OldLanguageCode(ctx context.Context) (v *postcontributor.LanguageCode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLanguageCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLanguageCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLanguageCode: %w", err) + } + return oldValue.LanguageCode, nil +} + +// ClearLanguageCode clears the value of the "language_code" field. +func (m *PostContributorMutation) ClearLanguageCode() { + m.language_code = nil + m.clearedFields[postcontributor.FieldLanguageCode] = struct{}{} +} + +// LanguageCodeCleared returns if the "language_code" field was cleared in this mutation. +func (m *PostContributorMutation) LanguageCodeCleared() bool { + _, ok := m.clearedFields[postcontributor.FieldLanguageCode] + return ok +} + +// ResetLanguageCode resets all changes to the "language_code" field. +func (m *PostContributorMutation) ResetLanguageCode() { + m.language_code = nil + delete(m.clearedFields, postcontributor.FieldLanguageCode) +} + +// SetCreatedAt sets the "created_at" field. +func (m *PostContributorMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *PostContributorMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the PostContributor entity. +// If the PostContributor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PostContributorMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *PostContributorMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetPostID sets the "post" edge to the Post entity by id. +func (m *PostContributorMutation) SetPostID(id int) { + m.post = &id +} + +// ClearPost clears the "post" edge to the Post entity. +func (m *PostContributorMutation) ClearPost() { + m.clearedpost = true +} + +// PostCleared reports if the "post" edge to the Post entity was cleared. +func (m *PostContributorMutation) PostCleared() bool { + return m.clearedpost +} + +// PostID returns the "post" edge ID in the mutation. +func (m *PostContributorMutation) PostID() (id int, exists bool) { + if m.post != nil { + return *m.post, true + } + return +} + +// PostIDs returns the "post" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PostID instead. It exists only for internal usage by the builders. +func (m *PostContributorMutation) PostIDs() (ids []int) { + if id := m.post; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPost resets all changes to the "post" edge. +func (m *PostContributorMutation) ResetPost() { + m.post = nil + m.clearedpost = false +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by id. +func (m *PostContributorMutation) SetContributorID(id int) { + m.contributor = &id +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (m *PostContributorMutation) ClearContributor() { + m.clearedcontributor = true +} + +// ContributorCleared reports if the "contributor" edge to the Contributor entity was cleared. +func (m *PostContributorMutation) ContributorCleared() bool { + return m.clearedcontributor +} + +// ContributorID returns the "contributor" edge ID in the mutation. +func (m *PostContributorMutation) ContributorID() (id int, exists bool) { + if m.contributor != nil { + return *m.contributor, true + } + return +} + +// ContributorIDs returns the "contributor" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// ContributorID instead. It exists only for internal usage by the builders. +func (m *PostContributorMutation) ContributorIDs() (ids []int) { + if id := m.contributor; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetContributor resets all changes to the "contributor" edge. +func (m *PostContributorMutation) ResetContributor() { + m.contributor = nil + m.clearedcontributor = false +} + +// SetRoleID sets the "role" edge to the ContributorRole entity by id. +func (m *PostContributorMutation) SetRoleID(id int) { + m.role = &id +} + +// ClearRole clears the "role" edge to the ContributorRole entity. +func (m *PostContributorMutation) ClearRole() { + m.clearedrole = true +} + +// RoleCleared reports if the "role" edge to the ContributorRole entity was cleared. +func (m *PostContributorMutation) RoleCleared() bool { + return m.clearedrole +} + +// RoleID returns the "role" edge ID in the mutation. +func (m *PostContributorMutation) RoleID() (id int, exists bool) { + if m.role != nil { + return *m.role, true + } + return +} + +// RoleIDs returns the "role" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// RoleID instead. It exists only for internal usage by the builders. +func (m *PostContributorMutation) RoleIDs() (ids []int) { + if id := m.role; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetRole resets all changes to the "role" edge. +func (m *PostContributorMutation) ResetRole() { + m.role = nil + m.clearedrole = false +} + +// Where appends a list predicates to the PostContributorMutation builder. +func (m *PostContributorMutation) Where(ps ...predicate.PostContributor) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PostContributorMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PostContributorMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PostContributor, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PostContributorMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PostContributorMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PostContributor). +func (m *PostContributorMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PostContributorMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.language_code != nil { + fields = append(fields, postcontributor.FieldLanguageCode) + } + if m.created_at != nil { + fields = append(fields, postcontributor.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PostContributorMutation) Field(name string) (ent.Value, bool) { + switch name { + case postcontributor.FieldLanguageCode: + return m.LanguageCode() + case postcontributor.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PostContributorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case postcontributor.FieldLanguageCode: + return m.OldLanguageCode(ctx) + case postcontributor.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown PostContributor field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostContributorMutation) SetField(name string, value ent.Value) error { + switch name { + case postcontributor.FieldLanguageCode: + v, ok := value.(postcontributor.LanguageCode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLanguageCode(v) + return nil + case postcontributor.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown PostContributor field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PostContributorMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PostContributorMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PostContributorMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown PostContributor numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PostContributorMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(postcontributor.FieldLanguageCode) { + fields = append(fields, postcontributor.FieldLanguageCode) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PostContributorMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PostContributorMutation) ClearField(name string) error { + switch name { + case postcontributor.FieldLanguageCode: + m.ClearLanguageCode() + return nil + } + return fmt.Errorf("unknown PostContributor nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PostContributorMutation) ResetField(name string) error { + switch name { + case postcontributor.FieldLanguageCode: + m.ResetLanguageCode() + return nil + case postcontributor.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown PostContributor field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PostContributorMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.post != nil { + edges = append(edges, postcontributor.EdgePost) + } + if m.contributor != nil { + edges = append(edges, postcontributor.EdgeContributor) + } + if m.role != nil { + edges = append(edges, postcontributor.EdgeRole) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PostContributorMutation) AddedIDs(name string) []ent.Value { + switch name { + case postcontributor.EdgePost: + if id := m.post; id != nil { + return []ent.Value{*id} + } + case postcontributor.EdgeContributor: + if id := m.contributor; id != nil { + return []ent.Value{*id} + } + case postcontributor.EdgeRole: + if id := m.role; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PostContributorMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PostContributorMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PostContributorMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedpost { + edges = append(edges, postcontributor.EdgePost) + } + if m.clearedcontributor { + edges = append(edges, postcontributor.EdgeContributor) + } + if m.clearedrole { + edges = append(edges, postcontributor.EdgeRole) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PostContributorMutation) EdgeCleared(name string) bool { + switch name { + case postcontributor.EdgePost: + return m.clearedpost + case postcontributor.EdgeContributor: + return m.clearedcontributor + case postcontributor.EdgeRole: + return m.clearedrole + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PostContributorMutation) ClearEdge(name string) error { + switch name { + case postcontributor.EdgePost: + m.ClearPost() + return nil + case postcontributor.EdgeContributor: + m.ClearContributor() + return nil + case postcontributor.EdgeRole: + m.ClearRole() + return nil + } + return fmt.Errorf("unknown PostContributor unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PostContributorMutation) ResetEdge(name string) error { + switch name { + case postcontributor.EdgePost: + m.ResetPost() + return nil + case postcontributor.EdgeContributor: + m.ResetContributor() + return nil + case postcontributor.EdgeRole: + m.ResetRole() + return nil + } + return fmt.Errorf("unknown PostContributor edge %s", name) +} + +// RoleMutation represents an operation that mutates the Role nodes in the graph. +type RoleMutation struct { + config + op Op + typ string + id *int + name *string + description *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + permissions map[int]struct{} + removedpermissions map[int]struct{} + clearedpermissions bool + users map[int]struct{} + removedusers map[int]struct{} + clearedusers bool + done bool + oldValue func(context.Context) (*Role, error) + predicates []predicate.Role +} + +var _ ent.Mutation = (*RoleMutation)(nil) + +// roleOption allows management of the mutation configuration using functional options. +type roleOption func(*RoleMutation) + +// newRoleMutation creates new mutation for the Role entity. +func newRoleMutation(c config, op Op, opts ...roleOption) *RoleMutation { + m := &RoleMutation{ + config: c, + op: op, + typ: TypeRole, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withRoleID sets the ID field of the mutation. +func withRoleID(id int) roleOption { + return func(m *RoleMutation) { + var ( + err error + once sync.Once + value *Role + ) + m.oldValue = func(ctx context.Context) (*Role, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Role.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withRole sets the old Role of the mutation. +func withRole(node *Role) roleOption { + return func(m *RoleMutation) { + m.oldValue = func(context.Context) (*Role, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m RoleMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m RoleMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *RoleMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *RoleMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Role.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *RoleMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *RoleMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Role entity. +// If the Role object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RoleMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *RoleMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *RoleMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *RoleMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Role entity. +// If the Role object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RoleMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *RoleMutation) ClearDescription() { + m.description = nil + m.clearedFields[role.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *RoleMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[role.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *RoleMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, role.FieldDescription) +} + +// SetCreatedAt sets the "created_at" field. +func (m *RoleMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *RoleMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Role entity. +// If the Role object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RoleMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *RoleMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *RoleMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *RoleMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Role entity. +// If the Role object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RoleMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *RoleMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddPermissionIDs adds the "permissions" edge to the Permission entity by ids. +func (m *RoleMutation) AddPermissionIDs(ids ...int) { + if m.permissions == nil { + m.permissions = make(map[int]struct{}) + } + for i := range ids { + m.permissions[ids[i]] = struct{}{} + } +} + +// ClearPermissions clears the "permissions" edge to the Permission entity. +func (m *RoleMutation) ClearPermissions() { + m.clearedpermissions = true +} + +// PermissionsCleared reports if the "permissions" edge to the Permission entity was cleared. +func (m *RoleMutation) PermissionsCleared() bool { + return m.clearedpermissions +} + +// RemovePermissionIDs removes the "permissions" edge to the Permission entity by IDs. +func (m *RoleMutation) RemovePermissionIDs(ids ...int) { + if m.removedpermissions == nil { + m.removedpermissions = make(map[int]struct{}) + } + for i := range ids { + delete(m.permissions, ids[i]) + m.removedpermissions[ids[i]] = struct{}{} + } +} + +// RemovedPermissions returns the removed IDs of the "permissions" edge to the Permission entity. +func (m *RoleMutation) RemovedPermissionsIDs() (ids []int) { + for id := range m.removedpermissions { + ids = append(ids, id) + } + return +} + +// PermissionsIDs returns the "permissions" edge IDs in the mutation. +func (m *RoleMutation) PermissionsIDs() (ids []int) { + for id := range m.permissions { + ids = append(ids, id) + } + return +} + +// ResetPermissions resets all changes to the "permissions" edge. +func (m *RoleMutation) ResetPermissions() { + m.permissions = nil + m.clearedpermissions = false + m.removedpermissions = nil +} + +// AddUserIDs adds the "users" edge to the User entity by ids. +func (m *RoleMutation) AddUserIDs(ids ...int) { + if m.users == nil { + m.users = make(map[int]struct{}) + } + for i := range ids { + m.users[ids[i]] = struct{}{} + } +} + +// ClearUsers clears the "users" edge to the User entity. +func (m *RoleMutation) ClearUsers() { + m.clearedusers = true +} + +// UsersCleared reports if the "users" edge to the User entity was cleared. +func (m *RoleMutation) UsersCleared() bool { + return m.clearedusers +} + +// RemoveUserIDs removes the "users" edge to the User entity by IDs. +func (m *RoleMutation) RemoveUserIDs(ids ...int) { + if m.removedusers == nil { + m.removedusers = make(map[int]struct{}) + } + for i := range ids { + delete(m.users, ids[i]) + m.removedusers[ids[i]] = struct{}{} + } +} + +// RemovedUsers returns the removed IDs of the "users" edge to the User entity. +func (m *RoleMutation) RemovedUsersIDs() (ids []int) { + for id := range m.removedusers { + ids = append(ids, id) + } + return +} + +// UsersIDs returns the "users" edge IDs in the mutation. +func (m *RoleMutation) UsersIDs() (ids []int) { + for id := range m.users { + ids = append(ids, id) + } + return +} + +// ResetUsers resets all changes to the "users" edge. +func (m *RoleMutation) ResetUsers() { + m.users = nil + m.clearedusers = false + m.removedusers = nil +} + +// Where appends a list predicates to the RoleMutation builder. +func (m *RoleMutation) Where(ps ...predicate.Role) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the RoleMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RoleMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Role, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *RoleMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *RoleMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Role). +func (m *RoleMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *RoleMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.name != nil { + fields = append(fields, role.FieldName) + } + if m.description != nil { + fields = append(fields, role.FieldDescription) + } + if m.created_at != nil { + fields = append(fields, role.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, role.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *RoleMutation) Field(name string) (ent.Value, bool) { + switch name { + case role.FieldName: + return m.Name() + case role.FieldDescription: + return m.Description() + case role.FieldCreatedAt: + return m.CreatedAt() + case role.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *RoleMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case role.FieldName: + return m.OldName(ctx) + case role.FieldDescription: + return m.OldDescription(ctx) + case role.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case role.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Role field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RoleMutation) SetField(name string, value ent.Value) error { + switch name { + case role.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case role.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case role.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case role.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Role field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *RoleMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *RoleMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RoleMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Role numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *RoleMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(role.FieldDescription) { + fields = append(fields, role.FieldDescription) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *RoleMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *RoleMutation) ClearField(name string) error { + switch name { + case role.FieldDescription: + m.ClearDescription() + return nil + } + return fmt.Errorf("unknown Role nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *RoleMutation) ResetField(name string) error { + switch name { + case role.FieldName: + m.ResetName() + return nil + case role.FieldDescription: + m.ResetDescription() + return nil + case role.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case role.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Role field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *RoleMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.permissions != nil { + edges = append(edges, role.EdgePermissions) + } + if m.users != nil { + edges = append(edges, role.EdgeUsers) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *RoleMutation) AddedIDs(name string) []ent.Value { + switch name { + case role.EdgePermissions: + ids := make([]ent.Value, 0, len(m.permissions)) + for id := range m.permissions { + ids = append(ids, id) + } + return ids + case role.EdgeUsers: + ids := make([]ent.Value, 0, len(m.users)) + for id := range m.users { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *RoleMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + if m.removedpermissions != nil { + edges = append(edges, role.EdgePermissions) + } + if m.removedusers != nil { + edges = append(edges, role.EdgeUsers) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *RoleMutation) RemovedIDs(name string) []ent.Value { + switch name { + case role.EdgePermissions: + ids := make([]ent.Value, 0, len(m.removedpermissions)) + for id := range m.removedpermissions { + ids = append(ids, id) + } + return ids + case role.EdgeUsers: + ids := make([]ent.Value, 0, len(m.removedusers)) + for id := range m.removedusers { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *RoleMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedpermissions { + edges = append(edges, role.EdgePermissions) + } + if m.clearedusers { + edges = append(edges, role.EdgeUsers) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *RoleMutation) EdgeCleared(name string) bool { + switch name { + case role.EdgePermissions: + return m.clearedpermissions + case role.EdgeUsers: + return m.clearedusers + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *RoleMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Role unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *RoleMutation) ResetEdge(name string) error { + switch name { + case role.EdgePermissions: + m.ResetPermissions() + return nil + case role.EdgeUsers: + m.ResetUsers() + return nil + } + return fmt.Errorf("unknown Role edge %s", name) +} + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *int + email *string + password_hash *string + status *user.Status + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + roles map[int]struct{} + removedroles map[int]struct{} + clearedroles bool + contributors map[int]struct{} + removedcontributors map[int]struct{} + clearedcontributors bool + media map[int]struct{} + removedmedia map[int]struct{} + clearedmedia bool + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id int) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetEmail sets the "email" field. +func (m *UserMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *UserMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ResetEmail resets all changes to the "email" field. +func (m *UserMutation) ResetEmail() { + m.email = nil +} + +// SetPasswordHash sets the "password_hash" field. +func (m *UserMutation) SetPasswordHash(s string) { + m.password_hash = &s +} + +// PasswordHash returns the value of the "password_hash" field in the mutation. +func (m *UserMutation) PasswordHash() (r string, exists bool) { + v := m.password_hash + if v == nil { + return + } + return *v, true +} + +// OldPasswordHash returns the old "password_hash" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPasswordHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPasswordHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPasswordHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPasswordHash: %w", err) + } + return oldValue.PasswordHash, nil +} + +// ResetPasswordHash resets all changes to the "password_hash" field. +func (m *UserMutation) ResetPasswordHash() { + m.password_hash = nil +} + +// SetStatus sets the "status" field. +func (m *UserMutation) SetStatus(u user.Status) { + m.status = &u +} + +// Status returns the value of the "status" field in the mutation. +func (m *UserMutation) Status() (r user.Status, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldStatus(ctx context.Context) (v user.Status, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UserMutation) ResetStatus() { + m.status = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddRoleIDs adds the "roles" edge to the Role entity by ids. +func (m *UserMutation) AddRoleIDs(ids ...int) { + if m.roles == nil { + m.roles = make(map[int]struct{}) + } + for i := range ids { + m.roles[ids[i]] = struct{}{} + } +} + +// ClearRoles clears the "roles" edge to the Role entity. +func (m *UserMutation) ClearRoles() { + m.clearedroles = true +} + +// RolesCleared reports if the "roles" edge to the Role entity was cleared. +func (m *UserMutation) RolesCleared() bool { + return m.clearedroles +} + +// RemoveRoleIDs removes the "roles" edge to the Role entity by IDs. +func (m *UserMutation) RemoveRoleIDs(ids ...int) { + if m.removedroles == nil { + m.removedroles = make(map[int]struct{}) + } + for i := range ids { + delete(m.roles, ids[i]) + m.removedroles[ids[i]] = struct{}{} + } +} + +// RemovedRoles returns the removed IDs of the "roles" edge to the Role entity. +func (m *UserMutation) RemovedRolesIDs() (ids []int) { + for id := range m.removedroles { + ids = append(ids, id) + } + return +} + +// RolesIDs returns the "roles" edge IDs in the mutation. +func (m *UserMutation) RolesIDs() (ids []int) { + for id := range m.roles { + ids = append(ids, id) + } + return +} + +// ResetRoles resets all changes to the "roles" edge. +func (m *UserMutation) ResetRoles() { + m.roles = nil + m.clearedroles = false + m.removedroles = nil +} + +// AddContributorIDs adds the "contributors" edge to the Contributor entity by ids. +func (m *UserMutation) AddContributorIDs(ids ...int) { + if m.contributors == nil { + m.contributors = make(map[int]struct{}) + } + for i := range ids { + m.contributors[ids[i]] = struct{}{} + } +} + +// ClearContributors clears the "contributors" edge to the Contributor entity. +func (m *UserMutation) ClearContributors() { + m.clearedcontributors = true +} + +// ContributorsCleared reports if the "contributors" edge to the Contributor entity was cleared. +func (m *UserMutation) ContributorsCleared() bool { + return m.clearedcontributors +} + +// RemoveContributorIDs removes the "contributors" edge to the Contributor entity by IDs. +func (m *UserMutation) RemoveContributorIDs(ids ...int) { + if m.removedcontributors == nil { + m.removedcontributors = make(map[int]struct{}) + } + for i := range ids { + delete(m.contributors, ids[i]) + m.removedcontributors[ids[i]] = struct{}{} + } +} + +// RemovedContributors returns the removed IDs of the "contributors" edge to the Contributor entity. +func (m *UserMutation) RemovedContributorsIDs() (ids []int) { + for id := range m.removedcontributors { + ids = append(ids, id) + } + return +} + +// ContributorsIDs returns the "contributors" edge IDs in the mutation. +func (m *UserMutation) ContributorsIDs() (ids []int) { + for id := range m.contributors { + ids = append(ids, id) + } + return +} + +// ResetContributors resets all changes to the "contributors" edge. +func (m *UserMutation) ResetContributors() { + m.contributors = nil + m.clearedcontributors = false + m.removedcontributors = nil +} + +// AddMediumIDs adds the "media" edge to the Media entity by ids. +func (m *UserMutation) AddMediumIDs(ids ...int) { + if m.media == nil { + m.media = make(map[int]struct{}) + } + for i := range ids { + m.media[ids[i]] = struct{}{} + } +} + +// ClearMedia clears the "media" edge to the Media entity. +func (m *UserMutation) ClearMedia() { + m.clearedmedia = true +} + +// MediaCleared reports if the "media" edge to the Media entity was cleared. +func (m *UserMutation) MediaCleared() bool { + return m.clearedmedia +} + +// RemoveMediumIDs removes the "media" edge to the Media entity by IDs. +func (m *UserMutation) RemoveMediumIDs(ids ...int) { + if m.removedmedia == nil { + m.removedmedia = make(map[int]struct{}) + } + for i := range ids { + delete(m.media, ids[i]) + m.removedmedia[ids[i]] = struct{}{} + } +} + +// RemovedMedia returns the removed IDs of the "media" edge to the Media entity. +func (m *UserMutation) RemovedMediaIDs() (ids []int) { + for id := range m.removedmedia { + ids = append(ids, id) + } + return +} + +// MediaIDs returns the "media" edge IDs in the mutation. +func (m *UserMutation) MediaIDs() (ids []int) { + for id := range m.media { + ids = append(ids, id) + } + return +} + +// ResetMedia resets all changes to the "media" edge. +func (m *UserMutation) ResetMedia() { + m.media = nil + m.clearedmedia = false + m.removedmedia = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.email != nil { + fields = append(fields, user.FieldEmail) + } + if m.password_hash != nil { + fields = append(fields, user.FieldPasswordHash) + } + if m.status != nil { + fields = append(fields, user.FieldStatus) + } + if m.created_at != nil { + fields = append(fields, user.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, user.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldEmail: + return m.Email() + case user.FieldPasswordHash: + return m.PasswordHash() + case user.FieldStatus: + return m.Status() + case user.FieldCreatedAt: + return m.CreatedAt() + case user.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldEmail: + return m.OldEmail(ctx) + case user.FieldPasswordHash: + return m.OldPasswordHash(ctx) + case user.FieldStatus: + return m.OldStatus(ctx) + case user.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case user.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case user.FieldPasswordHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPasswordHash(v) + return nil + case user.FieldStatus: + v, ok := value.(user.Status) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case user.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case user.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldEmail: + m.ResetEmail() + return nil + case user.FieldPasswordHash: + m.ResetPasswordHash() + return nil + case user.FieldStatus: + m.ResetStatus() + return nil + case user.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case user.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.roles != nil { + edges = append(edges, user.EdgeRoles) + } + if m.contributors != nil { + edges = append(edges, user.EdgeContributors) + } + if m.media != nil { + edges = append(edges, user.EdgeMedia) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + switch name { + case user.EdgeRoles: + ids := make([]ent.Value, 0, len(m.roles)) + for id := range m.roles { + ids = append(ids, id) + } + return ids + case user.EdgeContributors: + ids := make([]ent.Value, 0, len(m.contributors)) + for id := range m.contributors { + ids = append(ids, id) + } + return ids + case user.EdgeMedia: + ids := make([]ent.Value, 0, len(m.media)) + for id := range m.media { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedroles != nil { + edges = append(edges, user.EdgeRoles) + } + if m.removedcontributors != nil { + edges = append(edges, user.EdgeContributors) + } + if m.removedmedia != nil { + edges = append(edges, user.EdgeMedia) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + switch name { + case user.EdgeRoles: + ids := make([]ent.Value, 0, len(m.removedroles)) + for id := range m.removedroles { + ids = append(ids, id) + } + return ids + case user.EdgeContributors: + ids := make([]ent.Value, 0, len(m.removedcontributors)) + for id := range m.removedcontributors { + ids = append(ids, id) + } + return ids + case user.EdgeMedia: + ids := make([]ent.Value, 0, len(m.removedmedia)) + for id := range m.removedmedia { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedroles { + edges = append(edges, user.EdgeRoles) + } + if m.clearedcontributors { + edges = append(edges, user.EdgeContributors) + } + if m.clearedmedia { + edges = append(edges, user.EdgeMedia) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + switch name { + case user.EdgeRoles: + return m.clearedroles + case user.EdgeContributors: + return m.clearedcontributors + case user.EdgeMedia: + return m.clearedmedia + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + switch name { + case user.EdgeRoles: + m.ResetRoles() + return nil + case user.EdgeContributors: + m.ResetContributors() + return nil + case user.EdgeMedia: + m.ResetMedia() + return nil + } + return fmt.Errorf("unknown User edge %s", name) +} diff --git a/backend/ent/permission.go b/backend/ent/permission.go new file mode 100644 index 0000000..a8d191d --- /dev/null +++ b/backend/ent/permission.go @@ -0,0 +1,176 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/permission" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Permission is the model entity for the Permission schema. +type Permission struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Resource name, e.g., 'media', 'post' + Resource string `json:"resource,omitempty"` + // Action name, e.g., 'create', 'read', 'update', 'delete' + Action string `json:"action,omitempty"` + // Human readable description of the permission + Description string `json:"description,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PermissionQuery when eager-loading is set. + Edges PermissionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PermissionEdges holds the relations/edges for other nodes in the graph. +type PermissionEdges struct { + // Roles holds the value of the roles edge. + Roles []*Role `json:"roles,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// RolesOrErr returns the Roles value or an error if the edge +// was not loaded in eager-loading. +func (e PermissionEdges) RolesOrErr() ([]*Role, error) { + if e.loadedTypes[0] { + return e.Roles, nil + } + return nil, &NotLoadedError{edge: "roles"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Permission) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case permission.FieldID: + values[i] = new(sql.NullInt64) + case permission.FieldResource, permission.FieldAction, permission.FieldDescription: + values[i] = new(sql.NullString) + case permission.FieldCreatedAt, permission.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Permission fields. +func (pe *Permission) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case permission.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + pe.ID = int(value.Int64) + case permission.FieldResource: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field resource", values[i]) + } else if value.Valid { + pe.Resource = value.String + } + case permission.FieldAction: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field action", values[i]) + } else if value.Valid { + pe.Action = value.String + } + case permission.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + pe.Description = value.String + } + case permission.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + pe.CreatedAt = value.Time + } + case permission.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + pe.UpdatedAt = value.Time + } + default: + pe.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Permission. +// This includes values selected through modifiers, order, etc. +func (pe *Permission) Value(name string) (ent.Value, error) { + return pe.selectValues.Get(name) +} + +// QueryRoles queries the "roles" edge of the Permission entity. +func (pe *Permission) QueryRoles() *RoleQuery { + return NewPermissionClient(pe.config).QueryRoles(pe) +} + +// Update returns a builder for updating this Permission. +// Note that you need to call Permission.Unwrap() before calling this method if this Permission +// was returned from a transaction, and the transaction was committed or rolled back. +func (pe *Permission) Update() *PermissionUpdateOne { + return NewPermissionClient(pe.config).UpdateOne(pe) +} + +// Unwrap unwraps the Permission entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pe *Permission) Unwrap() *Permission { + _tx, ok := pe.config.driver.(*txDriver) + if !ok { + panic("ent: Permission is not a transactional entity") + } + pe.config.driver = _tx.drv + return pe +} + +// String implements the fmt.Stringer. +func (pe *Permission) String() string { + var builder strings.Builder + builder.WriteString("Permission(") + builder.WriteString(fmt.Sprintf("id=%v, ", pe.ID)) + builder.WriteString("resource=") + builder.WriteString(pe.Resource) + builder.WriteString(", ") + builder.WriteString("action=") + builder.WriteString(pe.Action) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(pe.Description) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(pe.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(pe.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Permissions is a parsable slice of Permission. +type Permissions []*Permission diff --git a/backend/ent/permission/permission.go b/backend/ent/permission/permission.go new file mode 100644 index 0000000..2d57603 --- /dev/null +++ b/backend/ent/permission/permission.go @@ -0,0 +1,129 @@ +// Code generated by ent, DO NOT EDIT. + +package permission + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the permission type in the database. + Label = "permission" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldResource holds the string denoting the resource field in the database. + FieldResource = "resource" + // FieldAction holds the string denoting the action field in the database. + FieldAction = "action" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeRoles holds the string denoting the roles edge name in mutations. + EdgeRoles = "roles" + // Table holds the table name of the permission in the database. + Table = "permissions" + // RolesTable is the table that holds the roles relation/edge. The primary key declared below. + RolesTable = "role_permissions" + // RolesInverseTable is the table name for the Role entity. + // It exists in this package in order to avoid circular dependency with the "role" package. + RolesInverseTable = "roles" +) + +// Columns holds all SQL columns for permission fields. +var Columns = []string{ + FieldID, + FieldResource, + FieldAction, + FieldDescription, + FieldCreatedAt, + FieldUpdatedAt, +} + +var ( + // RolesPrimaryKey and RolesColumn2 are the table columns denoting the + // primary key for the roles relation (M2M). + RolesPrimaryKey = []string{"role_id", "permission_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // ResourceValidator is a validator for the "resource" field. It is called by the builders before save. + ResourceValidator func(string) error + // ActionValidator is a validator for the "action" field. It is called by the builders before save. + ActionValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Permission queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByResource orders the results by the resource field. +func ByResource(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResource, opts...).ToFunc() +} + +// ByAction orders the results by the action field. +func ByAction(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAction, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByRolesCount orders the results by roles count. +func ByRolesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRolesStep(), opts...) + } +} + +// ByRoles orders the results by roles terms. +func ByRoles(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRolesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newRolesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RolesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, RolesTable, RolesPrimaryKey...), + ) +} diff --git a/backend/ent/permission/where.go b/backend/ent/permission/where.go new file mode 100644 index 0000000..075afa0 --- /dev/null +++ b/backend/ent/permission/where.go @@ -0,0 +1,404 @@ +// Code generated by ent, DO NOT EDIT. + +package permission + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldID, id)) +} + +// Resource applies equality check predicate on the "resource" field. It's identical to ResourceEQ. +func Resource(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldResource, v)) +} + +// Action applies equality check predicate on the "action" field. It's identical to ActionEQ. +func Action(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldAction, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldDescription, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// ResourceEQ applies the EQ predicate on the "resource" field. +func ResourceEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldResource, v)) +} + +// ResourceNEQ applies the NEQ predicate on the "resource" field. +func ResourceNEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldResource, v)) +} + +// ResourceIn applies the In predicate on the "resource" field. +func ResourceIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldResource, vs...)) +} + +// ResourceNotIn applies the NotIn predicate on the "resource" field. +func ResourceNotIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldResource, vs...)) +} + +// ResourceGT applies the GT predicate on the "resource" field. +func ResourceGT(v string) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldResource, v)) +} + +// ResourceGTE applies the GTE predicate on the "resource" field. +func ResourceGTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldResource, v)) +} + +// ResourceLT applies the LT predicate on the "resource" field. +func ResourceLT(v string) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldResource, v)) +} + +// ResourceLTE applies the LTE predicate on the "resource" field. +func ResourceLTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldResource, v)) +} + +// ResourceContains applies the Contains predicate on the "resource" field. +func ResourceContains(v string) predicate.Permission { + return predicate.Permission(sql.FieldContains(FieldResource, v)) +} + +// ResourceHasPrefix applies the HasPrefix predicate on the "resource" field. +func ResourceHasPrefix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasPrefix(FieldResource, v)) +} + +// ResourceHasSuffix applies the HasSuffix predicate on the "resource" field. +func ResourceHasSuffix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasSuffix(FieldResource, v)) +} + +// ResourceEqualFold applies the EqualFold predicate on the "resource" field. +func ResourceEqualFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldEqualFold(FieldResource, v)) +} + +// ResourceContainsFold applies the ContainsFold predicate on the "resource" field. +func ResourceContainsFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldContainsFold(FieldResource, v)) +} + +// ActionEQ applies the EQ predicate on the "action" field. +func ActionEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldAction, v)) +} + +// ActionNEQ applies the NEQ predicate on the "action" field. +func ActionNEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldAction, v)) +} + +// ActionIn applies the In predicate on the "action" field. +func ActionIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldAction, vs...)) +} + +// ActionNotIn applies the NotIn predicate on the "action" field. +func ActionNotIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldAction, vs...)) +} + +// ActionGT applies the GT predicate on the "action" field. +func ActionGT(v string) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldAction, v)) +} + +// ActionGTE applies the GTE predicate on the "action" field. +func ActionGTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldAction, v)) +} + +// ActionLT applies the LT predicate on the "action" field. +func ActionLT(v string) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldAction, v)) +} + +// ActionLTE applies the LTE predicate on the "action" field. +func ActionLTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldAction, v)) +} + +// ActionContains applies the Contains predicate on the "action" field. +func ActionContains(v string) predicate.Permission { + return predicate.Permission(sql.FieldContains(FieldAction, v)) +} + +// ActionHasPrefix applies the HasPrefix predicate on the "action" field. +func ActionHasPrefix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasPrefix(FieldAction, v)) +} + +// ActionHasSuffix applies the HasSuffix predicate on the "action" field. +func ActionHasSuffix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasSuffix(FieldAction, v)) +} + +// ActionEqualFold applies the EqualFold predicate on the "action" field. +func ActionEqualFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldEqualFold(FieldAction, v)) +} + +// ActionContainsFold applies the ContainsFold predicate on the "action" field. +func ActionContainsFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldContainsFold(FieldAction, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Permission { + return predicate.Permission(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Permission { + return predicate.Permission(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Permission { + return predicate.Permission(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Permission { + return predicate.Permission(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Permission { + return predicate.Permission(sql.FieldContainsFold(FieldDescription, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Permission { + return predicate.Permission(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Permission { + return predicate.Permission(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Permission { + return predicate.Permission(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasRoles applies the HasEdge predicate on the "roles" edge. +func HasRoles() predicate.Permission { + return predicate.Permission(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, RolesTable, RolesPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates). +func HasRolesWith(preds ...predicate.Role) predicate.Permission { + return predicate.Permission(func(s *sql.Selector) { + step := newRolesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Permission) predicate.Permission { + return predicate.Permission(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Permission) predicate.Permission { + return predicate.Permission(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Permission) predicate.Permission { + return predicate.Permission(sql.NotPredicates(p)) +} diff --git a/backend/ent/permission_create.go b/backend/ent/permission_create.go new file mode 100644 index 0000000..c346d6d --- /dev/null +++ b/backend/ent/permission_create.go @@ -0,0 +1,313 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/role" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PermissionCreate is the builder for creating a Permission entity. +type PermissionCreate struct { + config + mutation *PermissionMutation + hooks []Hook +} + +// SetResource sets the "resource" field. +func (pc *PermissionCreate) SetResource(s string) *PermissionCreate { + pc.mutation.SetResource(s) + return pc +} + +// SetAction sets the "action" field. +func (pc *PermissionCreate) SetAction(s string) *PermissionCreate { + pc.mutation.SetAction(s) + return pc +} + +// SetDescription sets the "description" field. +func (pc *PermissionCreate) SetDescription(s string) *PermissionCreate { + pc.mutation.SetDescription(s) + return pc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (pc *PermissionCreate) SetNillableDescription(s *string) *PermissionCreate { + if s != nil { + pc.SetDescription(*s) + } + return pc +} + +// SetCreatedAt sets the "created_at" field. +func (pc *PermissionCreate) SetCreatedAt(t time.Time) *PermissionCreate { + pc.mutation.SetCreatedAt(t) + return pc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pc *PermissionCreate) SetNillableCreatedAt(t *time.Time) *PermissionCreate { + if t != nil { + pc.SetCreatedAt(*t) + } + return pc +} + +// SetUpdatedAt sets the "updated_at" field. +func (pc *PermissionCreate) SetUpdatedAt(t time.Time) *PermissionCreate { + pc.mutation.SetUpdatedAt(t) + return pc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (pc *PermissionCreate) SetNillableUpdatedAt(t *time.Time) *PermissionCreate { + if t != nil { + pc.SetUpdatedAt(*t) + } + return pc +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (pc *PermissionCreate) AddRoleIDs(ids ...int) *PermissionCreate { + pc.mutation.AddRoleIDs(ids...) + return pc +} + +// AddRoles adds the "roles" edges to the Role entity. +func (pc *PermissionCreate) AddRoles(r ...*Role) *PermissionCreate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return pc.AddRoleIDs(ids...) +} + +// Mutation returns the PermissionMutation object of the builder. +func (pc *PermissionCreate) Mutation() *PermissionMutation { + return pc.mutation +} + +// Save creates the Permission in the database. +func (pc *PermissionCreate) Save(ctx context.Context) (*Permission, error) { + pc.defaults() + return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pc *PermissionCreate) SaveX(ctx context.Context) *Permission { + v, err := pc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pc *PermissionCreate) Exec(ctx context.Context) error { + _, err := pc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pc *PermissionCreate) ExecX(ctx context.Context) { + if err := pc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pc *PermissionCreate) defaults() { + if _, ok := pc.mutation.CreatedAt(); !ok { + v := permission.DefaultCreatedAt() + pc.mutation.SetCreatedAt(v) + } + if _, ok := pc.mutation.UpdatedAt(); !ok { + v := permission.DefaultUpdatedAt() + pc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pc *PermissionCreate) check() error { + if _, ok := pc.mutation.Resource(); !ok { + return &ValidationError{Name: "resource", err: errors.New(`ent: missing required field "Permission.resource"`)} + } + if v, ok := pc.mutation.Resource(); ok { + if err := permission.ResourceValidator(v); err != nil { + return &ValidationError{Name: "resource", err: fmt.Errorf(`ent: validator failed for field "Permission.resource": %w`, err)} + } + } + if _, ok := pc.mutation.Action(); !ok { + return &ValidationError{Name: "action", err: errors.New(`ent: missing required field "Permission.action"`)} + } + if v, ok := pc.mutation.Action(); ok { + if err := permission.ActionValidator(v); err != nil { + return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "Permission.action": %w`, err)} + } + } + if _, ok := pc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Permission.created_at"`)} + } + if _, ok := pc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Permission.updated_at"`)} + } + return nil +} + +func (pc *PermissionCreate) sqlSave(ctx context.Context) (*Permission, error) { + if err := pc.check(); err != nil { + return nil, err + } + _node, _spec := pc.createSpec() + if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + pc.mutation.id = &_node.ID + pc.mutation.done = true + return _node, nil +} + +func (pc *PermissionCreate) createSpec() (*Permission, *sqlgraph.CreateSpec) { + var ( + _node = &Permission{config: pc.config} + _spec = sqlgraph.NewCreateSpec(permission.Table, sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt)) + ) + if value, ok := pc.mutation.Resource(); ok { + _spec.SetField(permission.FieldResource, field.TypeString, value) + _node.Resource = value + } + if value, ok := pc.mutation.Action(); ok { + _spec.SetField(permission.FieldAction, field.TypeString, value) + _node.Action = value + } + if value, ok := pc.mutation.Description(); ok { + _spec.SetField(permission.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := pc.mutation.CreatedAt(); ok { + _spec.SetField(permission.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := pc.mutation.UpdatedAt(); ok { + _spec.SetField(permission.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := pc.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// PermissionCreateBulk is the builder for creating many Permission entities in bulk. +type PermissionCreateBulk struct { + config + err error + builders []*PermissionCreate +} + +// Save creates the Permission entities in the database. +func (pcb *PermissionCreateBulk) Save(ctx context.Context) ([]*Permission, error) { + if pcb.err != nil { + return nil, pcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) + nodes := make([]*Permission, len(pcb.builders)) + mutators := make([]Mutator, len(pcb.builders)) + for i := range pcb.builders { + func(i int, root context.Context) { + builder := pcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PermissionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pcb *PermissionCreateBulk) SaveX(ctx context.Context) []*Permission { + v, err := pcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcb *PermissionCreateBulk) Exec(ctx context.Context) error { + _, err := pcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcb *PermissionCreateBulk) ExecX(ctx context.Context) { + if err := pcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/permission_delete.go b/backend/ent/permission_delete.go new file mode 100644 index 0000000..d85f5c0 --- /dev/null +++ b/backend/ent/permission_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PermissionDelete is the builder for deleting a Permission entity. +type PermissionDelete struct { + config + hooks []Hook + mutation *PermissionMutation +} + +// Where appends a list predicates to the PermissionDelete builder. +func (pd *PermissionDelete) Where(ps ...predicate.Permission) *PermissionDelete { + pd.mutation.Where(ps...) + return pd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pd *PermissionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pd *PermissionDelete) ExecX(ctx context.Context) int { + n, err := pd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pd *PermissionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(permission.Table, sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt)) + if ps := pd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pd.mutation.done = true + return affected, err +} + +// PermissionDeleteOne is the builder for deleting a single Permission entity. +type PermissionDeleteOne struct { + pd *PermissionDelete +} + +// Where appends a list predicates to the PermissionDelete builder. +func (pdo *PermissionDeleteOne) Where(ps ...predicate.Permission) *PermissionDeleteOne { + pdo.pd.mutation.Where(ps...) + return pdo +} + +// Exec executes the deletion query. +func (pdo *PermissionDeleteOne) Exec(ctx context.Context) error { + n, err := pdo.pd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{permission.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pdo *PermissionDeleteOne) ExecX(ctx context.Context) { + if err := pdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/permission_query.go b/backend/ent/permission_query.go new file mode 100644 index 0000000..1af6ed8 --- /dev/null +++ b/backend/ent/permission_query.go @@ -0,0 +1,637 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PermissionQuery is the builder for querying Permission entities. +type PermissionQuery struct { + config + ctx *QueryContext + order []permission.OrderOption + inters []Interceptor + predicates []predicate.Permission + withRoles *RoleQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PermissionQuery builder. +func (pq *PermissionQuery) Where(ps ...predicate.Permission) *PermissionQuery { + pq.predicates = append(pq.predicates, ps...) + return pq +} + +// Limit the number of records to be returned by this query. +func (pq *PermissionQuery) Limit(limit int) *PermissionQuery { + pq.ctx.Limit = &limit + return pq +} + +// Offset to start from. +func (pq *PermissionQuery) Offset(offset int) *PermissionQuery { + pq.ctx.Offset = &offset + return pq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pq *PermissionQuery) Unique(unique bool) *PermissionQuery { + pq.ctx.Unique = &unique + return pq +} + +// Order specifies how the records should be ordered. +func (pq *PermissionQuery) Order(o ...permission.OrderOption) *PermissionQuery { + pq.order = append(pq.order, o...) + return pq +} + +// QueryRoles chains the current query on the "roles" edge. +func (pq *PermissionQuery) QueryRoles() *RoleQuery { + query := (&RoleClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(permission.Table, permission.FieldID, selector), + sqlgraph.To(role.Table, role.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, permission.RolesTable, permission.RolesPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Permission entity from the query. +// Returns a *NotFoundError when no Permission was found. +func (pq *PermissionQuery) First(ctx context.Context) (*Permission, error) { + nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{permission.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pq *PermissionQuery) FirstX(ctx context.Context) *Permission { + node, err := pq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Permission ID from the query. +// Returns a *NotFoundError when no Permission ID was found. +func (pq *PermissionQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{permission.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pq *PermissionQuery) FirstIDX(ctx context.Context) int { + id, err := pq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Permission entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Permission entity is found. +// Returns a *NotFoundError when no Permission entities are found. +func (pq *PermissionQuery) Only(ctx context.Context) (*Permission, error) { + nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{permission.Label} + default: + return nil, &NotSingularError{permission.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pq *PermissionQuery) OnlyX(ctx context.Context) *Permission { + node, err := pq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Permission ID in the query. +// Returns a *NotSingularError when more than one Permission ID is found. +// Returns a *NotFoundError when no entities are found. +func (pq *PermissionQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{permission.Label} + default: + err = &NotSingularError{permission.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pq *PermissionQuery) OnlyIDX(ctx context.Context) int { + id, err := pq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Permissions. +func (pq *PermissionQuery) All(ctx context.Context) ([]*Permission, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryAll) + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Permission, *PermissionQuery]() + return withInterceptors[[]*Permission](ctx, pq, qr, pq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pq *PermissionQuery) AllX(ctx context.Context) []*Permission { + nodes, err := pq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Permission IDs. +func (pq *PermissionQuery) IDs(ctx context.Context) (ids []int, err error) { + if pq.ctx.Unique == nil && pq.path != nil { + pq.Unique(true) + } + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryIDs) + if err = pq.Select(permission.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pq *PermissionQuery) IDsX(ctx context.Context) []int { + ids, err := pq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pq *PermissionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryCount) + if err := pq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pq, querierCount[*PermissionQuery](), pq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pq *PermissionQuery) CountX(ctx context.Context) int { + count, err := pq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pq *PermissionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryExist) + switch _, err := pq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pq *PermissionQuery) ExistX(ctx context.Context) bool { + exist, err := pq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PermissionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pq *PermissionQuery) Clone() *PermissionQuery { + if pq == nil { + return nil + } + return &PermissionQuery{ + config: pq.config, + ctx: pq.ctx.Clone(), + order: append([]permission.OrderOption{}, pq.order...), + inters: append([]Interceptor{}, pq.inters...), + predicates: append([]predicate.Permission{}, pq.predicates...), + withRoles: pq.withRoles.Clone(), + // clone intermediate query. + sql: pq.sql.Clone(), + path: pq.path, + } +} + +// WithRoles tells the query-builder to eager-load the nodes that are connected to +// the "roles" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PermissionQuery) WithRoles(opts ...func(*RoleQuery)) *PermissionQuery { + query := (&RoleClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withRoles = query + return pq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Resource string `json:"resource,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Permission.Query(). +// GroupBy(permission.FieldResource). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (pq *PermissionQuery) GroupBy(field string, fields ...string) *PermissionGroupBy { + pq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PermissionGroupBy{build: pq} + grbuild.flds = &pq.ctx.Fields + grbuild.label = permission.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Resource string `json:"resource,omitempty"` +// } +// +// client.Permission.Query(). +// Select(permission.FieldResource). +// Scan(ctx, &v) +func (pq *PermissionQuery) Select(fields ...string) *PermissionSelect { + pq.ctx.Fields = append(pq.ctx.Fields, fields...) + sbuild := &PermissionSelect{PermissionQuery: pq} + sbuild.label = permission.Label + sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PermissionSelect configured with the given aggregations. +func (pq *PermissionQuery) Aggregate(fns ...AggregateFunc) *PermissionSelect { + return pq.Select().Aggregate(fns...) +} + +func (pq *PermissionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pq); err != nil { + return err + } + } + } + for _, f := range pq.ctx.Fields { + if !permission.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if pq.path != nil { + prev, err := pq.path(ctx) + if err != nil { + return err + } + pq.sql = prev + } + return nil +} + +func (pq *PermissionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Permission, error) { + var ( + nodes = []*Permission{} + _spec = pq.querySpec() + loadedTypes = [1]bool{ + pq.withRoles != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Permission).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Permission{config: pq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := pq.withRoles; query != nil { + if err := pq.loadRoles(ctx, query, nodes, + func(n *Permission) { n.Edges.Roles = []*Role{} }, + func(n *Permission, e *Role) { n.Edges.Roles = append(n.Edges.Roles, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (pq *PermissionQuery) loadRoles(ctx context.Context, query *RoleQuery, nodes []*Permission, init func(*Permission), assign func(*Permission, *Role)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*Permission) + nids := make(map[int]map[*Permission]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(permission.RolesTable) + s.Join(joinT).On(s.C(role.FieldID), joinT.C(permission.RolesPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(permission.RolesPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(permission.RolesPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*Permission]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Role](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "roles" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (pq *PermissionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pq.querySpec() + _spec.Node.Columns = pq.ctx.Fields + if len(pq.ctx.Fields) > 0 { + _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pq.driver, _spec) +} + +func (pq *PermissionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(permission.Table, permission.Columns, sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt)) + _spec.From = pq.sql + if unique := pq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pq.path != nil { + _spec.Unique = true + } + if fields := pq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, permission.FieldID) + for i := range fields { + if fields[i] != permission.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pq *PermissionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pq.driver.Dialect()) + t1 := builder.Table(permission.Table) + columns := pq.ctx.Fields + if len(columns) == 0 { + columns = permission.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pq.sql != nil { + selector = pq.sql + selector.Select(selector.Columns(columns...)...) + } + if pq.ctx.Unique != nil && *pq.ctx.Unique { + selector.Distinct() + } + for _, p := range pq.predicates { + p(selector) + } + for _, p := range pq.order { + p(selector) + } + if offset := pq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// PermissionGroupBy is the group-by builder for Permission entities. +type PermissionGroupBy struct { + selector + build *PermissionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PermissionGroupBy) Aggregate(fns ...AggregateFunc) *PermissionGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pgb *PermissionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pgb.build.ctx, ent.OpQueryGroupBy) + if err := pgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PermissionQuery, *PermissionGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) +} + +func (pgb *PermissionGroupBy) sqlScan(ctx context.Context, root *PermissionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pgb.fns)) + for _, fn := range pgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) + for _, f := range *pgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PermissionSelect is the builder for selecting fields of Permission entities. +type PermissionSelect struct { + *PermissionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ps *PermissionSelect) Aggregate(fns ...AggregateFunc) *PermissionSelect { + ps.fns = append(ps.fns, fns...) + return ps +} + +// Scan applies the selector query and scans the result into the given value. +func (ps *PermissionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ps.ctx, ent.OpQuerySelect) + if err := ps.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PermissionQuery, *PermissionSelect](ctx, ps.PermissionQuery, ps, ps.inters, v) +} + +func (ps *PermissionSelect) sqlScan(ctx context.Context, root *PermissionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ps.fns)) + for _, fn := range ps.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ps.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/permission_update.go b/backend/ent/permission_update.go new file mode 100644 index 0000000..69aa17c --- /dev/null +++ b/backend/ent/permission_update.go @@ -0,0 +1,531 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PermissionUpdate is the builder for updating Permission entities. +type PermissionUpdate struct { + config + hooks []Hook + mutation *PermissionMutation +} + +// Where appends a list predicates to the PermissionUpdate builder. +func (pu *PermissionUpdate) Where(ps ...predicate.Permission) *PermissionUpdate { + pu.mutation.Where(ps...) + return pu +} + +// SetResource sets the "resource" field. +func (pu *PermissionUpdate) SetResource(s string) *PermissionUpdate { + pu.mutation.SetResource(s) + return pu +} + +// SetNillableResource sets the "resource" field if the given value is not nil. +func (pu *PermissionUpdate) SetNillableResource(s *string) *PermissionUpdate { + if s != nil { + pu.SetResource(*s) + } + return pu +} + +// SetAction sets the "action" field. +func (pu *PermissionUpdate) SetAction(s string) *PermissionUpdate { + pu.mutation.SetAction(s) + return pu +} + +// SetNillableAction sets the "action" field if the given value is not nil. +func (pu *PermissionUpdate) SetNillableAction(s *string) *PermissionUpdate { + if s != nil { + pu.SetAction(*s) + } + return pu +} + +// SetDescription sets the "description" field. +func (pu *PermissionUpdate) SetDescription(s string) *PermissionUpdate { + pu.mutation.SetDescription(s) + return pu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (pu *PermissionUpdate) SetNillableDescription(s *string) *PermissionUpdate { + if s != nil { + pu.SetDescription(*s) + } + return pu +} + +// ClearDescription clears the value of the "description" field. +func (pu *PermissionUpdate) ClearDescription() *PermissionUpdate { + pu.mutation.ClearDescription() + return pu +} + +// SetUpdatedAt sets the "updated_at" field. +func (pu *PermissionUpdate) SetUpdatedAt(t time.Time) *PermissionUpdate { + pu.mutation.SetUpdatedAt(t) + return pu +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (pu *PermissionUpdate) AddRoleIDs(ids ...int) *PermissionUpdate { + pu.mutation.AddRoleIDs(ids...) + return pu +} + +// AddRoles adds the "roles" edges to the Role entity. +func (pu *PermissionUpdate) AddRoles(r ...*Role) *PermissionUpdate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return pu.AddRoleIDs(ids...) +} + +// Mutation returns the PermissionMutation object of the builder. +func (pu *PermissionUpdate) Mutation() *PermissionMutation { + return pu.mutation +} + +// ClearRoles clears all "roles" edges to the Role entity. +func (pu *PermissionUpdate) ClearRoles() *PermissionUpdate { + pu.mutation.ClearRoles() + return pu +} + +// RemoveRoleIDs removes the "roles" edge to Role entities by IDs. +func (pu *PermissionUpdate) RemoveRoleIDs(ids ...int) *PermissionUpdate { + pu.mutation.RemoveRoleIDs(ids...) + return pu +} + +// RemoveRoles removes "roles" edges to Role entities. +func (pu *PermissionUpdate) RemoveRoles(r ...*Role) *PermissionUpdate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return pu.RemoveRoleIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pu *PermissionUpdate) Save(ctx context.Context) (int, error) { + pu.defaults() + return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pu *PermissionUpdate) SaveX(ctx context.Context) int { + affected, err := pu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pu *PermissionUpdate) Exec(ctx context.Context) error { + _, err := pu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pu *PermissionUpdate) ExecX(ctx context.Context) { + if err := pu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pu *PermissionUpdate) defaults() { + if _, ok := pu.mutation.UpdatedAt(); !ok { + v := permission.UpdateDefaultUpdatedAt() + pu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pu *PermissionUpdate) check() error { + if v, ok := pu.mutation.Resource(); ok { + if err := permission.ResourceValidator(v); err != nil { + return &ValidationError{Name: "resource", err: fmt.Errorf(`ent: validator failed for field "Permission.resource": %w`, err)} + } + } + if v, ok := pu.mutation.Action(); ok { + if err := permission.ActionValidator(v); err != nil { + return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "Permission.action": %w`, err)} + } + } + return nil +} + +func (pu *PermissionUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := pu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(permission.Table, permission.Columns, sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt)) + if ps := pu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pu.mutation.Resource(); ok { + _spec.SetField(permission.FieldResource, field.TypeString, value) + } + if value, ok := pu.mutation.Action(); ok { + _spec.SetField(permission.FieldAction, field.TypeString, value) + } + if value, ok := pu.mutation.Description(); ok { + _spec.SetField(permission.FieldDescription, field.TypeString, value) + } + if pu.mutation.DescriptionCleared() { + _spec.ClearField(permission.FieldDescription, field.TypeString) + } + if value, ok := pu.mutation.UpdatedAt(); ok { + _spec.SetField(permission.FieldUpdatedAt, field.TypeTime, value) + } + if pu.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedRolesIDs(); len(nodes) > 0 && !pu.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{permission.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pu.mutation.done = true + return n, nil +} + +// PermissionUpdateOne is the builder for updating a single Permission entity. +type PermissionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PermissionMutation +} + +// SetResource sets the "resource" field. +func (puo *PermissionUpdateOne) SetResource(s string) *PermissionUpdateOne { + puo.mutation.SetResource(s) + return puo +} + +// SetNillableResource sets the "resource" field if the given value is not nil. +func (puo *PermissionUpdateOne) SetNillableResource(s *string) *PermissionUpdateOne { + if s != nil { + puo.SetResource(*s) + } + return puo +} + +// SetAction sets the "action" field. +func (puo *PermissionUpdateOne) SetAction(s string) *PermissionUpdateOne { + puo.mutation.SetAction(s) + return puo +} + +// SetNillableAction sets the "action" field if the given value is not nil. +func (puo *PermissionUpdateOne) SetNillableAction(s *string) *PermissionUpdateOne { + if s != nil { + puo.SetAction(*s) + } + return puo +} + +// SetDescription sets the "description" field. +func (puo *PermissionUpdateOne) SetDescription(s string) *PermissionUpdateOne { + puo.mutation.SetDescription(s) + return puo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (puo *PermissionUpdateOne) SetNillableDescription(s *string) *PermissionUpdateOne { + if s != nil { + puo.SetDescription(*s) + } + return puo +} + +// ClearDescription clears the value of the "description" field. +func (puo *PermissionUpdateOne) ClearDescription() *PermissionUpdateOne { + puo.mutation.ClearDescription() + return puo +} + +// SetUpdatedAt sets the "updated_at" field. +func (puo *PermissionUpdateOne) SetUpdatedAt(t time.Time) *PermissionUpdateOne { + puo.mutation.SetUpdatedAt(t) + return puo +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (puo *PermissionUpdateOne) AddRoleIDs(ids ...int) *PermissionUpdateOne { + puo.mutation.AddRoleIDs(ids...) + return puo +} + +// AddRoles adds the "roles" edges to the Role entity. +func (puo *PermissionUpdateOne) AddRoles(r ...*Role) *PermissionUpdateOne { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return puo.AddRoleIDs(ids...) +} + +// Mutation returns the PermissionMutation object of the builder. +func (puo *PermissionUpdateOne) Mutation() *PermissionMutation { + return puo.mutation +} + +// ClearRoles clears all "roles" edges to the Role entity. +func (puo *PermissionUpdateOne) ClearRoles() *PermissionUpdateOne { + puo.mutation.ClearRoles() + return puo +} + +// RemoveRoleIDs removes the "roles" edge to Role entities by IDs. +func (puo *PermissionUpdateOne) RemoveRoleIDs(ids ...int) *PermissionUpdateOne { + puo.mutation.RemoveRoleIDs(ids...) + return puo +} + +// RemoveRoles removes "roles" edges to Role entities. +func (puo *PermissionUpdateOne) RemoveRoles(r ...*Role) *PermissionUpdateOne { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return puo.RemoveRoleIDs(ids...) +} + +// Where appends a list predicates to the PermissionUpdate builder. +func (puo *PermissionUpdateOne) Where(ps ...predicate.Permission) *PermissionUpdateOne { + puo.mutation.Where(ps...) + return puo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (puo *PermissionUpdateOne) Select(field string, fields ...string) *PermissionUpdateOne { + puo.fields = append([]string{field}, fields...) + return puo +} + +// Save executes the query and returns the updated Permission entity. +func (puo *PermissionUpdateOne) Save(ctx context.Context) (*Permission, error) { + puo.defaults() + return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (puo *PermissionUpdateOne) SaveX(ctx context.Context) *Permission { + node, err := puo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (puo *PermissionUpdateOne) Exec(ctx context.Context) error { + _, err := puo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (puo *PermissionUpdateOne) ExecX(ctx context.Context) { + if err := puo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (puo *PermissionUpdateOne) defaults() { + if _, ok := puo.mutation.UpdatedAt(); !ok { + v := permission.UpdateDefaultUpdatedAt() + puo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (puo *PermissionUpdateOne) check() error { + if v, ok := puo.mutation.Resource(); ok { + if err := permission.ResourceValidator(v); err != nil { + return &ValidationError{Name: "resource", err: fmt.Errorf(`ent: validator failed for field "Permission.resource": %w`, err)} + } + } + if v, ok := puo.mutation.Action(); ok { + if err := permission.ActionValidator(v); err != nil { + return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "Permission.action": %w`, err)} + } + } + return nil +} + +func (puo *PermissionUpdateOne) sqlSave(ctx context.Context) (_node *Permission, err error) { + if err := puo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(permission.Table, permission.Columns, sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt)) + id, ok := puo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Permission.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := puo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, permission.FieldID) + for _, f := range fields { + if !permission.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != permission.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := puo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := puo.mutation.Resource(); ok { + _spec.SetField(permission.FieldResource, field.TypeString, value) + } + if value, ok := puo.mutation.Action(); ok { + _spec.SetField(permission.FieldAction, field.TypeString, value) + } + if value, ok := puo.mutation.Description(); ok { + _spec.SetField(permission.FieldDescription, field.TypeString, value) + } + if puo.mutation.DescriptionCleared() { + _spec.ClearField(permission.FieldDescription, field.TypeString) + } + if value, ok := puo.mutation.UpdatedAt(); ok { + _spec.SetField(permission.FieldUpdatedAt, field.TypeTime, value) + } + if puo.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedRolesIDs(); len(nodes) > 0 && !puo.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: permission.RolesTable, + Columns: permission.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Permission{config: puo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{permission.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + puo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/post.go b/backend/ent/post.go new file mode 100644 index 0000000..0be8691 --- /dev/null +++ b/backend/ent/post.go @@ -0,0 +1,210 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/post" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Post is the model entity for the Post schema. +type Post struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Status holds the value of the "status" field. + Status post.Status `json:"status,omitempty"` + // Slug holds the value of the "slug" field. + Slug string `json:"slug,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PostQuery when eager-loading is set. + Edges PostEdges `json:"edges"` + category_posts *int + selectValues sql.SelectValues +} + +// PostEdges holds the relations/edges for other nodes in the graph. +type PostEdges struct { + // Contents holds the value of the contents edge. + Contents []*PostContent `json:"contents,omitempty"` + // Contributors holds the value of the contributors edge. + Contributors []*PostContributor `json:"contributors,omitempty"` + // Category holds the value of the category edge. + Category *Category `json:"category,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// ContentsOrErr returns the Contents value or an error if the edge +// was not loaded in eager-loading. +func (e PostEdges) ContentsOrErr() ([]*PostContent, error) { + if e.loadedTypes[0] { + return e.Contents, nil + } + return nil, &NotLoadedError{edge: "contents"} +} + +// ContributorsOrErr returns the Contributors value or an error if the edge +// was not loaded in eager-loading. +func (e PostEdges) ContributorsOrErr() ([]*PostContributor, error) { + if e.loadedTypes[1] { + return e.Contributors, nil + } + return nil, &NotLoadedError{edge: "contributors"} +} + +// CategoryOrErr returns the Category value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PostEdges) CategoryOrErr() (*Category, error) { + if e.Category != nil { + return e.Category, nil + } else if e.loadedTypes[2] { + return nil, &NotFoundError{label: category.Label} + } + return nil, &NotLoadedError{edge: "category"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Post) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case post.FieldID: + values[i] = new(sql.NullInt64) + case post.FieldStatus, post.FieldSlug: + values[i] = new(sql.NullString) + case post.FieldCreatedAt, post.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case post.ForeignKeys[0]: // category_posts + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Post fields. +func (po *Post) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case post.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + po.ID = int(value.Int64) + case post.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + po.Status = post.Status(value.String) + } + case post.FieldSlug: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field slug", values[i]) + } else if value.Valid { + po.Slug = value.String + } + case post.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + po.CreatedAt = value.Time + } + case post.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + po.UpdatedAt = value.Time + } + case post.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field category_posts", value) + } else if value.Valid { + po.category_posts = new(int) + *po.category_posts = int(value.Int64) + } + default: + po.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Post. +// This includes values selected through modifiers, order, etc. +func (po *Post) Value(name string) (ent.Value, error) { + return po.selectValues.Get(name) +} + +// QueryContents queries the "contents" edge of the Post entity. +func (po *Post) QueryContents() *PostContentQuery { + return NewPostClient(po.config).QueryContents(po) +} + +// QueryContributors queries the "contributors" edge of the Post entity. +func (po *Post) QueryContributors() *PostContributorQuery { + return NewPostClient(po.config).QueryContributors(po) +} + +// QueryCategory queries the "category" edge of the Post entity. +func (po *Post) QueryCategory() *CategoryQuery { + return NewPostClient(po.config).QueryCategory(po) +} + +// Update returns a builder for updating this Post. +// Note that you need to call Post.Unwrap() before calling this method if this Post +// was returned from a transaction, and the transaction was committed or rolled back. +func (po *Post) Update() *PostUpdateOne { + return NewPostClient(po.config).UpdateOne(po) +} + +// Unwrap unwraps the Post entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (po *Post) Unwrap() *Post { + _tx, ok := po.config.driver.(*txDriver) + if !ok { + panic("ent: Post is not a transactional entity") + } + po.config.driver = _tx.drv + return po +} + +// String implements the fmt.Stringer. +func (po *Post) String() string { + var builder strings.Builder + builder.WriteString("Post(") + builder.WriteString(fmt.Sprintf("id=%v, ", po.ID)) + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", po.Status)) + builder.WriteString(", ") + builder.WriteString("slug=") + builder.WriteString(po.Slug) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(po.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(po.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Posts is a parsable slice of Post. +type Posts []*Post diff --git a/backend/ent/post/post.go b/backend/ent/post/post.go new file mode 100644 index 0000000..6dc1e83 --- /dev/null +++ b/backend/ent/post/post.go @@ -0,0 +1,207 @@ +// Code generated by ent, DO NOT EDIT. + +package post + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the post type in the database. + Label = "post" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldSlug holds the string denoting the slug field in the database. + FieldSlug = "slug" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeContents holds the string denoting the contents edge name in mutations. + EdgeContents = "contents" + // EdgeContributors holds the string denoting the contributors edge name in mutations. + EdgeContributors = "contributors" + // EdgeCategory holds the string denoting the category edge name in mutations. + EdgeCategory = "category" + // Table holds the table name of the post in the database. + Table = "posts" + // ContentsTable is the table that holds the contents relation/edge. + ContentsTable = "post_contents" + // ContentsInverseTable is the table name for the PostContent entity. + // It exists in this package in order to avoid circular dependency with the "postcontent" package. + ContentsInverseTable = "post_contents" + // ContentsColumn is the table column denoting the contents relation/edge. + ContentsColumn = "post_contents" + // ContributorsTable is the table that holds the contributors relation/edge. + ContributorsTable = "post_contributors" + // ContributorsInverseTable is the table name for the PostContributor entity. + // It exists in this package in order to avoid circular dependency with the "postcontributor" package. + ContributorsInverseTable = "post_contributors" + // ContributorsColumn is the table column denoting the contributors relation/edge. + ContributorsColumn = "post_contributors" + // CategoryTable is the table that holds the category relation/edge. + CategoryTable = "posts" + // CategoryInverseTable is the table name for the Category entity. + // It exists in this package in order to avoid circular dependency with the "category" package. + CategoryInverseTable = "categories" + // CategoryColumn is the table column denoting the category relation/edge. + CategoryColumn = "category_posts" +) + +// Columns holds all SQL columns for post fields. +var Columns = []string{ + FieldID, + FieldStatus, + FieldSlug, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "posts" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "category_posts", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // SlugValidator is a validator for the "slug" field. It is called by the builders before save. + SlugValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// Status defines the type for the "status" enum field. +type Status string + +// StatusDraft is the default value of the Status enum. +const DefaultStatus = StatusDraft + +// Status values. +const ( + StatusDraft Status = "draft" + StatusPublished Status = "published" + StatusArchived Status = "archived" +) + +func (s Status) String() string { + return string(s) +} + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s Status) error { + switch s { + case StatusDraft, StatusPublished, StatusArchived: + return nil + default: + return fmt.Errorf("post: invalid enum value for status field: %q", s) + } +} + +// OrderOption defines the ordering options for the Post queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// BySlug orders the results by the slug field. +func BySlug(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSlug, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByContentsCount orders the results by contents count. +func ByContentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContentsStep(), opts...) + } +} + +// ByContents orders the results by contents terms. +func ByContents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByContributorsCount orders the results by contributors count. +func ByContributorsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContributorsStep(), opts...) + } +} + +// ByContributors orders the results by contributors terms. +func ByContributors(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContributorsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByCategoryField orders the results by category field. +func ByCategoryField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newCategoryStep(), sql.OrderByField(field, opts...)) + } +} +func newContentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) +} +func newContributorsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContributorsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContributorsTable, ContributorsColumn), + ) +} +func newCategoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(CategoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) +} diff --git a/backend/ent/post/where.go b/backend/ent/post/where.go new file mode 100644 index 0000000..7b09ff9 --- /dev/null +++ b/backend/ent/post/where.go @@ -0,0 +1,320 @@ +// Code generated by ent, DO NOT EDIT. + +package post + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Post { + return predicate.Post(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Post { + return predicate.Post(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Post { + return predicate.Post(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Post { + return predicate.Post(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Post { + return predicate.Post(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Post { + return predicate.Post(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Post { + return predicate.Post(sql.FieldLTE(FieldID, id)) +} + +// Slug applies equality check predicate on the "slug" field. It's identical to SlugEQ. +func Slug(v string) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldSlug, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v Status) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v Status) predicate.Post { + return predicate.Post(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...Status) predicate.Post { + return predicate.Post(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...Status) predicate.Post { + return predicate.Post(sql.FieldNotIn(FieldStatus, vs...)) +} + +// SlugEQ applies the EQ predicate on the "slug" field. +func SlugEQ(v string) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldSlug, v)) +} + +// SlugNEQ applies the NEQ predicate on the "slug" field. +func SlugNEQ(v string) predicate.Post { + return predicate.Post(sql.FieldNEQ(FieldSlug, v)) +} + +// SlugIn applies the In predicate on the "slug" field. +func SlugIn(vs ...string) predicate.Post { + return predicate.Post(sql.FieldIn(FieldSlug, vs...)) +} + +// SlugNotIn applies the NotIn predicate on the "slug" field. +func SlugNotIn(vs ...string) predicate.Post { + return predicate.Post(sql.FieldNotIn(FieldSlug, vs...)) +} + +// SlugGT applies the GT predicate on the "slug" field. +func SlugGT(v string) predicate.Post { + return predicate.Post(sql.FieldGT(FieldSlug, v)) +} + +// SlugGTE applies the GTE predicate on the "slug" field. +func SlugGTE(v string) predicate.Post { + return predicate.Post(sql.FieldGTE(FieldSlug, v)) +} + +// SlugLT applies the LT predicate on the "slug" field. +func SlugLT(v string) predicate.Post { + return predicate.Post(sql.FieldLT(FieldSlug, v)) +} + +// SlugLTE applies the LTE predicate on the "slug" field. +func SlugLTE(v string) predicate.Post { + return predicate.Post(sql.FieldLTE(FieldSlug, v)) +} + +// SlugContains applies the Contains predicate on the "slug" field. +func SlugContains(v string) predicate.Post { + return predicate.Post(sql.FieldContains(FieldSlug, v)) +} + +// SlugHasPrefix applies the HasPrefix predicate on the "slug" field. +func SlugHasPrefix(v string) predicate.Post { + return predicate.Post(sql.FieldHasPrefix(FieldSlug, v)) +} + +// SlugHasSuffix applies the HasSuffix predicate on the "slug" field. +func SlugHasSuffix(v string) predicate.Post { + return predicate.Post(sql.FieldHasSuffix(FieldSlug, v)) +} + +// SlugEqualFold applies the EqualFold predicate on the "slug" field. +func SlugEqualFold(v string) predicate.Post { + return predicate.Post(sql.FieldEqualFold(FieldSlug, v)) +} + +// SlugContainsFold applies the ContainsFold predicate on the "slug" field. +func SlugContainsFold(v string) predicate.Post { + return predicate.Post(sql.FieldContainsFold(FieldSlug, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Post { + return predicate.Post(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Post { + return predicate.Post(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Post { + return predicate.Post(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Post { + return predicate.Post(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Post { + return predicate.Post(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Post { + return predicate.Post(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Post { + return predicate.Post(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Post { + return predicate.Post(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Post { + return predicate.Post(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Post { + return predicate.Post(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Post { + return predicate.Post(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Post { + return predicate.Post(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Post { + return predicate.Post(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Post { + return predicate.Post(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Post { + return predicate.Post(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasContents applies the HasEdge predicate on the "contents" edge. +func HasContents() predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContentsTable, ContentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContentsWith applies the HasEdge predicate on the "contents" edge with a given conditions (other predicates). +func HasContentsWith(preds ...predicate.PostContent) predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := newContentsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasContributors applies the HasEdge predicate on the "contributors" edge. +func HasContributors() predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContributorsTable, ContributorsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContributorsWith applies the HasEdge predicate on the "contributors" edge with a given conditions (other predicates). +func HasContributorsWith(preds ...predicate.PostContributor) predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := newContributorsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasCategory applies the HasEdge predicate on the "category" edge. +func HasCategory() predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, CategoryTable, CategoryColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasCategoryWith applies the HasEdge predicate on the "category" edge with a given conditions (other predicates). +func HasCategoryWith(preds ...predicate.Category) predicate.Post { + return predicate.Post(func(s *sql.Selector) { + step := newCategoryStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Post) predicate.Post { + return predicate.Post(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Post) predicate.Post { + return predicate.Post(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Post) predicate.Post { + return predicate.Post(sql.NotPredicates(p)) +} diff --git a/backend/ent/post_create.go b/backend/ent/post_create.go new file mode 100644 index 0000000..c001fe2 --- /dev/null +++ b/backend/ent/post_create.go @@ -0,0 +1,376 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostCreate is the builder for creating a Post entity. +type PostCreate struct { + config + mutation *PostMutation + hooks []Hook +} + +// SetStatus sets the "status" field. +func (pc *PostCreate) SetStatus(po post.Status) *PostCreate { + pc.mutation.SetStatus(po) + return pc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (pc *PostCreate) SetNillableStatus(po *post.Status) *PostCreate { + if po != nil { + pc.SetStatus(*po) + } + return pc +} + +// SetSlug sets the "slug" field. +func (pc *PostCreate) SetSlug(s string) *PostCreate { + pc.mutation.SetSlug(s) + return pc +} + +// SetCreatedAt sets the "created_at" field. +func (pc *PostCreate) SetCreatedAt(t time.Time) *PostCreate { + pc.mutation.SetCreatedAt(t) + return pc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pc *PostCreate) SetNillableCreatedAt(t *time.Time) *PostCreate { + if t != nil { + pc.SetCreatedAt(*t) + } + return pc +} + +// SetUpdatedAt sets the "updated_at" field. +func (pc *PostCreate) SetUpdatedAt(t time.Time) *PostCreate { + pc.mutation.SetUpdatedAt(t) + return pc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (pc *PostCreate) SetNillableUpdatedAt(t *time.Time) *PostCreate { + if t != nil { + pc.SetUpdatedAt(*t) + } + return pc +} + +// AddContentIDs adds the "contents" edge to the PostContent entity by IDs. +func (pc *PostCreate) AddContentIDs(ids ...int) *PostCreate { + pc.mutation.AddContentIDs(ids...) + return pc +} + +// AddContents adds the "contents" edges to the PostContent entity. +func (pc *PostCreate) AddContents(p ...*PostContent) *PostCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pc.AddContentIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the PostContributor entity by IDs. +func (pc *PostCreate) AddContributorIDs(ids ...int) *PostCreate { + pc.mutation.AddContributorIDs(ids...) + return pc +} + +// AddContributors adds the "contributors" edges to the PostContributor entity. +func (pc *PostCreate) AddContributors(p ...*PostContributor) *PostCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pc.AddContributorIDs(ids...) +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (pc *PostCreate) SetCategoryID(id int) *PostCreate { + pc.mutation.SetCategoryID(id) + return pc +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (pc *PostCreate) SetNillableCategoryID(id *int) *PostCreate { + if id != nil { + pc = pc.SetCategoryID(*id) + } + return pc +} + +// SetCategory sets the "category" edge to the Category entity. +func (pc *PostCreate) SetCategory(c *Category) *PostCreate { + return pc.SetCategoryID(c.ID) +} + +// Mutation returns the PostMutation object of the builder. +func (pc *PostCreate) Mutation() *PostMutation { + return pc.mutation +} + +// Save creates the Post in the database. +func (pc *PostCreate) Save(ctx context.Context) (*Post, error) { + pc.defaults() + return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pc *PostCreate) SaveX(ctx context.Context) *Post { + v, err := pc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pc *PostCreate) Exec(ctx context.Context) error { + _, err := pc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pc *PostCreate) ExecX(ctx context.Context) { + if err := pc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pc *PostCreate) defaults() { + if _, ok := pc.mutation.Status(); !ok { + v := post.DefaultStatus + pc.mutation.SetStatus(v) + } + if _, ok := pc.mutation.CreatedAt(); !ok { + v := post.DefaultCreatedAt() + pc.mutation.SetCreatedAt(v) + } + if _, ok := pc.mutation.UpdatedAt(); !ok { + v := post.DefaultUpdatedAt() + pc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pc *PostCreate) check() error { + if _, ok := pc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Post.status"`)} + } + if v, ok := pc.mutation.Status(); ok { + if err := post.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Post.status": %w`, err)} + } + } + if _, ok := pc.mutation.Slug(); !ok { + return &ValidationError{Name: "slug", err: errors.New(`ent: missing required field "Post.slug"`)} + } + if v, ok := pc.mutation.Slug(); ok { + if err := post.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "Post.slug": %w`, err)} + } + } + if _, ok := pc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Post.created_at"`)} + } + if _, ok := pc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Post.updated_at"`)} + } + return nil +} + +func (pc *PostCreate) sqlSave(ctx context.Context) (*Post, error) { + if err := pc.check(); err != nil { + return nil, err + } + _node, _spec := pc.createSpec() + if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + pc.mutation.id = &_node.ID + pc.mutation.done = true + return _node, nil +} + +func (pc *PostCreate) createSpec() (*Post, *sqlgraph.CreateSpec) { + var ( + _node = &Post{config: pc.config} + _spec = sqlgraph.NewCreateSpec(post.Table, sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt)) + ) + if value, ok := pc.mutation.Status(); ok { + _spec.SetField(post.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := pc.mutation.Slug(); ok { + _spec.SetField(post.FieldSlug, field.TypeString, value) + _node.Slug = value + } + if value, ok := pc.mutation.CreatedAt(); ok { + _spec.SetField(post.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := pc.mutation.UpdatedAt(); ok { + _spec.SetField(post.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := pc.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pc.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pc.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: post.CategoryTable, + Columns: []string{post.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.category_posts = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// PostCreateBulk is the builder for creating many Post entities in bulk. +type PostCreateBulk struct { + config + err error + builders []*PostCreate +} + +// Save creates the Post entities in the database. +func (pcb *PostCreateBulk) Save(ctx context.Context) ([]*Post, error) { + if pcb.err != nil { + return nil, pcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(pcb.builders)) + nodes := make([]*Post, len(pcb.builders)) + mutators := make([]Mutator, len(pcb.builders)) + for i := range pcb.builders { + func(i int, root context.Context) { + builder := pcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PostMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pcb *PostCreateBulk) SaveX(ctx context.Context) []*Post { + v, err := pcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcb *PostCreateBulk) Exec(ctx context.Context) error { + _, err := pcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcb *PostCreateBulk) ExecX(ctx context.Context) { + if err := pcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/post_delete.go b/backend/ent/post_delete.go new file mode 100644 index 0000000..3f0abd7 --- /dev/null +++ b/backend/ent/post_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostDelete is the builder for deleting a Post entity. +type PostDelete struct { + config + hooks []Hook + mutation *PostMutation +} + +// Where appends a list predicates to the PostDelete builder. +func (pd *PostDelete) Where(ps ...predicate.Post) *PostDelete { + pd.mutation.Where(ps...) + return pd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pd *PostDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pd *PostDelete) ExecX(ctx context.Context) int { + n, err := pd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pd *PostDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(post.Table, sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt)) + if ps := pd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pd.mutation.done = true + return affected, err +} + +// PostDeleteOne is the builder for deleting a single Post entity. +type PostDeleteOne struct { + pd *PostDelete +} + +// Where appends a list predicates to the PostDelete builder. +func (pdo *PostDeleteOne) Where(ps ...predicate.Post) *PostDeleteOne { + pdo.pd.mutation.Where(ps...) + return pdo +} + +// Exec executes the deletion query. +func (pdo *PostDeleteOne) Exec(ctx context.Context) error { + n, err := pdo.pd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{post.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pdo *PostDeleteOne) ExecX(ctx context.Context) { + if err := pdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/post_query.go b/backend/ent/post_query.go new file mode 100644 index 0000000..f90b5ff --- /dev/null +++ b/backend/ent/post_query.go @@ -0,0 +1,765 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostQuery is the builder for querying Post entities. +type PostQuery struct { + config + ctx *QueryContext + order []post.OrderOption + inters []Interceptor + predicates []predicate.Post + withContents *PostContentQuery + withContributors *PostContributorQuery + withCategory *CategoryQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PostQuery builder. +func (pq *PostQuery) Where(ps ...predicate.Post) *PostQuery { + pq.predicates = append(pq.predicates, ps...) + return pq +} + +// Limit the number of records to be returned by this query. +func (pq *PostQuery) Limit(limit int) *PostQuery { + pq.ctx.Limit = &limit + return pq +} + +// Offset to start from. +func (pq *PostQuery) Offset(offset int) *PostQuery { + pq.ctx.Offset = &offset + return pq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pq *PostQuery) Unique(unique bool) *PostQuery { + pq.ctx.Unique = &unique + return pq +} + +// Order specifies how the records should be ordered. +func (pq *PostQuery) Order(o ...post.OrderOption) *PostQuery { + pq.order = append(pq.order, o...) + return pq +} + +// QueryContents chains the current query on the "contents" edge. +func (pq *PostQuery) QueryContents() *PostContentQuery { + query := (&PostContentClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, selector), + sqlgraph.To(postcontent.Table, postcontent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, post.ContentsTable, post.ContentsColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryContributors chains the current query on the "contributors" edge. +func (pq *PostQuery) QueryContributors() *PostContributorQuery { + query := (&PostContributorClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, selector), + sqlgraph.To(postcontributor.Table, postcontributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, post.ContributorsTable, post.ContributorsColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryCategory chains the current query on the "category" edge. +func (pq *PostQuery) QueryCategory() *CategoryQuery { + query := (&CategoryClient{config: pq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(post.Table, post.FieldID, selector), + sqlgraph.To(category.Table, category.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, post.CategoryTable, post.CategoryColumn), + ) + fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Post entity from the query. +// Returns a *NotFoundError when no Post was found. +func (pq *PostQuery) First(ctx context.Context) (*Post, error) { + nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{post.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pq *PostQuery) FirstX(ctx context.Context) *Post { + node, err := pq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Post ID from the query. +// Returns a *NotFoundError when no Post ID was found. +func (pq *PostQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{post.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pq *PostQuery) FirstIDX(ctx context.Context) int { + id, err := pq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Post entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Post entity is found. +// Returns a *NotFoundError when no Post entities are found. +func (pq *PostQuery) Only(ctx context.Context) (*Post, error) { + nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{post.Label} + default: + return nil, &NotSingularError{post.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pq *PostQuery) OnlyX(ctx context.Context) *Post { + node, err := pq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Post ID in the query. +// Returns a *NotSingularError when more than one Post ID is found. +// Returns a *NotFoundError when no entities are found. +func (pq *PostQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{post.Label} + default: + err = &NotSingularError{post.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pq *PostQuery) OnlyIDX(ctx context.Context) int { + id, err := pq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Posts. +func (pq *PostQuery) All(ctx context.Context) ([]*Post, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryAll) + if err := pq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Post, *PostQuery]() + return withInterceptors[[]*Post](ctx, pq, qr, pq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pq *PostQuery) AllX(ctx context.Context) []*Post { + nodes, err := pq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Post IDs. +func (pq *PostQuery) IDs(ctx context.Context) (ids []int, err error) { + if pq.ctx.Unique == nil && pq.path != nil { + pq.Unique(true) + } + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryIDs) + if err = pq.Select(post.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pq *PostQuery) IDsX(ctx context.Context) []int { + ids, err := pq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pq *PostQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryCount) + if err := pq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pq, querierCount[*PostQuery](), pq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pq *PostQuery) CountX(ctx context.Context) int { + count, err := pq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pq *PostQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pq.ctx, ent.OpQueryExist) + switch _, err := pq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pq *PostQuery) ExistX(ctx context.Context) bool { + exist, err := pq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PostQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pq *PostQuery) Clone() *PostQuery { + if pq == nil { + return nil + } + return &PostQuery{ + config: pq.config, + ctx: pq.ctx.Clone(), + order: append([]post.OrderOption{}, pq.order...), + inters: append([]Interceptor{}, pq.inters...), + predicates: append([]predicate.Post{}, pq.predicates...), + withContents: pq.withContents.Clone(), + withContributors: pq.withContributors.Clone(), + withCategory: pq.withCategory.Clone(), + // clone intermediate query. + sql: pq.sql.Clone(), + path: pq.path, + } +} + +// WithContents tells the query-builder to eager-load the nodes that are connected to +// the "contents" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PostQuery) WithContents(opts ...func(*PostContentQuery)) *PostQuery { + query := (&PostContentClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withContents = query + return pq +} + +// WithContributors tells the query-builder to eager-load the nodes that are connected to +// the "contributors" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PostQuery) WithContributors(opts ...func(*PostContributorQuery)) *PostQuery { + query := (&PostContributorClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withContributors = query + return pq +} + +// WithCategory tells the query-builder to eager-load the nodes that are connected to +// the "category" edge. The optional arguments are used to configure the query builder of the edge. +func (pq *PostQuery) WithCategory(opts ...func(*CategoryQuery)) *PostQuery { + query := (&CategoryClient{config: pq.config}).Query() + for _, opt := range opts { + opt(query) + } + pq.withCategory = query + return pq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Status post.Status `json:"status,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Post.Query(). +// GroupBy(post.FieldStatus). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (pq *PostQuery) GroupBy(field string, fields ...string) *PostGroupBy { + pq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PostGroupBy{build: pq} + grbuild.flds = &pq.ctx.Fields + grbuild.label = post.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Status post.Status `json:"status,omitempty"` +// } +// +// client.Post.Query(). +// Select(post.FieldStatus). +// Scan(ctx, &v) +func (pq *PostQuery) Select(fields ...string) *PostSelect { + pq.ctx.Fields = append(pq.ctx.Fields, fields...) + sbuild := &PostSelect{PostQuery: pq} + sbuild.label = post.Label + sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PostSelect configured with the given aggregations. +func (pq *PostQuery) Aggregate(fns ...AggregateFunc) *PostSelect { + return pq.Select().Aggregate(fns...) +} + +func (pq *PostQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pq); err != nil { + return err + } + } + } + for _, f := range pq.ctx.Fields { + if !post.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if pq.path != nil { + prev, err := pq.path(ctx) + if err != nil { + return err + } + pq.sql = prev + } + return nil +} + +func (pq *PostQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Post, error) { + var ( + nodes = []*Post{} + withFKs = pq.withFKs + _spec = pq.querySpec() + loadedTypes = [3]bool{ + pq.withContents != nil, + pq.withContributors != nil, + pq.withCategory != nil, + } + ) + if pq.withCategory != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, post.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Post).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Post{config: pq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := pq.withContents; query != nil { + if err := pq.loadContents(ctx, query, nodes, + func(n *Post) { n.Edges.Contents = []*PostContent{} }, + func(n *Post, e *PostContent) { n.Edges.Contents = append(n.Edges.Contents, e) }); err != nil { + return nil, err + } + } + if query := pq.withContributors; query != nil { + if err := pq.loadContributors(ctx, query, nodes, + func(n *Post) { n.Edges.Contributors = []*PostContributor{} }, + func(n *Post, e *PostContributor) { n.Edges.Contributors = append(n.Edges.Contributors, e) }); err != nil { + return nil, err + } + } + if query := pq.withCategory; query != nil { + if err := pq.loadCategory(ctx, query, nodes, nil, + func(n *Post, e *Category) { n.Edges.Category = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (pq *PostQuery) loadContents(ctx context.Context, query *PostContentQuery, nodes []*Post, init func(*Post), assign func(*Post, *PostContent)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Post) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.PostContent(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(post.ContentsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.post_contents + if fk == nil { + return fmt.Errorf(`foreign-key "post_contents" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "post_contents" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (pq *PostQuery) loadContributors(ctx context.Context, query *PostContributorQuery, nodes []*Post, init func(*Post), assign func(*Post, *PostContributor)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*Post) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.PostContributor(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(post.ContributorsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.post_contributors + if fk == nil { + return fmt.Errorf(`foreign-key "post_contributors" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "post_contributors" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (pq *PostQuery) loadCategory(ctx context.Context, query *CategoryQuery, nodes []*Post, init func(*Post), assign func(*Post, *Category)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*Post) + for i := range nodes { + if nodes[i].category_posts == nil { + continue + } + fk := *nodes[i].category_posts + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(category.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "category_posts" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (pq *PostQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pq.querySpec() + _spec.Node.Columns = pq.ctx.Fields + if len(pq.ctx.Fields) > 0 { + _spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pq.driver, _spec) +} + +func (pq *PostQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(post.Table, post.Columns, sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt)) + _spec.From = pq.sql + if unique := pq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pq.path != nil { + _spec.Unique = true + } + if fields := pq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, post.FieldID) + for i := range fields { + if fields[i] != post.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pq *PostQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pq.driver.Dialect()) + t1 := builder.Table(post.Table) + columns := pq.ctx.Fields + if len(columns) == 0 { + columns = post.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pq.sql != nil { + selector = pq.sql + selector.Select(selector.Columns(columns...)...) + } + if pq.ctx.Unique != nil && *pq.ctx.Unique { + selector.Distinct() + } + for _, p := range pq.predicates { + p(selector) + } + for _, p := range pq.order { + p(selector) + } + if offset := pq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// PostGroupBy is the group-by builder for Post entities. +type PostGroupBy struct { + selector + build *PostQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pgb *PostGroupBy) Aggregate(fns ...AggregateFunc) *PostGroupBy { + pgb.fns = append(pgb.fns, fns...) + return pgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pgb *PostGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pgb.build.ctx, ent.OpQueryGroupBy) + if err := pgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostQuery, *PostGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v) +} + +func (pgb *PostGroupBy) sqlScan(ctx context.Context, root *PostQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pgb.fns)) + for _, fn := range pgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns)) + for _, f := range *pgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PostSelect is the builder for selecting fields of Post entities. +type PostSelect struct { + *PostQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ps *PostSelect) Aggregate(fns ...AggregateFunc) *PostSelect { + ps.fns = append(ps.fns, fns...) + return ps +} + +// Scan applies the selector query and scans the result into the given value. +func (ps *PostSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ps.ctx, ent.OpQuerySelect) + if err := ps.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostQuery, *PostSelect](ctx, ps.PostQuery, ps, ps.inters, v) +} + +func (ps *PostSelect) sqlScan(ctx context.Context, root *PostQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ps.fns)) + for _, fn := range ps.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ps.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ps.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/post_update.go b/backend/ent/post_update.go new file mode 100644 index 0000000..d936a6a --- /dev/null +++ b/backend/ent/post_update.go @@ -0,0 +1,785 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostUpdate is the builder for updating Post entities. +type PostUpdate struct { + config + hooks []Hook + mutation *PostMutation +} + +// Where appends a list predicates to the PostUpdate builder. +func (pu *PostUpdate) Where(ps ...predicate.Post) *PostUpdate { + pu.mutation.Where(ps...) + return pu +} + +// SetStatus sets the "status" field. +func (pu *PostUpdate) SetStatus(po post.Status) *PostUpdate { + pu.mutation.SetStatus(po) + return pu +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (pu *PostUpdate) SetNillableStatus(po *post.Status) *PostUpdate { + if po != nil { + pu.SetStatus(*po) + } + return pu +} + +// SetSlug sets the "slug" field. +func (pu *PostUpdate) SetSlug(s string) *PostUpdate { + pu.mutation.SetSlug(s) + return pu +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (pu *PostUpdate) SetNillableSlug(s *string) *PostUpdate { + if s != nil { + pu.SetSlug(*s) + } + return pu +} + +// SetCreatedAt sets the "created_at" field. +func (pu *PostUpdate) SetCreatedAt(t time.Time) *PostUpdate { + pu.mutation.SetCreatedAt(t) + return pu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pu *PostUpdate) SetNillableCreatedAt(t *time.Time) *PostUpdate { + if t != nil { + pu.SetCreatedAt(*t) + } + return pu +} + +// SetUpdatedAt sets the "updated_at" field. +func (pu *PostUpdate) SetUpdatedAt(t time.Time) *PostUpdate { + pu.mutation.SetUpdatedAt(t) + return pu +} + +// AddContentIDs adds the "contents" edge to the PostContent entity by IDs. +func (pu *PostUpdate) AddContentIDs(ids ...int) *PostUpdate { + pu.mutation.AddContentIDs(ids...) + return pu +} + +// AddContents adds the "contents" edges to the PostContent entity. +func (pu *PostUpdate) AddContents(p ...*PostContent) *PostUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.AddContentIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the PostContributor entity by IDs. +func (pu *PostUpdate) AddContributorIDs(ids ...int) *PostUpdate { + pu.mutation.AddContributorIDs(ids...) + return pu +} + +// AddContributors adds the "contributors" edges to the PostContributor entity. +func (pu *PostUpdate) AddContributors(p ...*PostContributor) *PostUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.AddContributorIDs(ids...) +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (pu *PostUpdate) SetCategoryID(id int) *PostUpdate { + pu.mutation.SetCategoryID(id) + return pu +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (pu *PostUpdate) SetNillableCategoryID(id *int) *PostUpdate { + if id != nil { + pu = pu.SetCategoryID(*id) + } + return pu +} + +// SetCategory sets the "category" edge to the Category entity. +func (pu *PostUpdate) SetCategory(c *Category) *PostUpdate { + return pu.SetCategoryID(c.ID) +} + +// Mutation returns the PostMutation object of the builder. +func (pu *PostUpdate) Mutation() *PostMutation { + return pu.mutation +} + +// ClearContents clears all "contents" edges to the PostContent entity. +func (pu *PostUpdate) ClearContents() *PostUpdate { + pu.mutation.ClearContents() + return pu +} + +// RemoveContentIDs removes the "contents" edge to PostContent entities by IDs. +func (pu *PostUpdate) RemoveContentIDs(ids ...int) *PostUpdate { + pu.mutation.RemoveContentIDs(ids...) + return pu +} + +// RemoveContents removes "contents" edges to PostContent entities. +func (pu *PostUpdate) RemoveContents(p ...*PostContent) *PostUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.RemoveContentIDs(ids...) +} + +// ClearContributors clears all "contributors" edges to the PostContributor entity. +func (pu *PostUpdate) ClearContributors() *PostUpdate { + pu.mutation.ClearContributors() + return pu +} + +// RemoveContributorIDs removes the "contributors" edge to PostContributor entities by IDs. +func (pu *PostUpdate) RemoveContributorIDs(ids ...int) *PostUpdate { + pu.mutation.RemoveContributorIDs(ids...) + return pu +} + +// RemoveContributors removes "contributors" edges to PostContributor entities. +func (pu *PostUpdate) RemoveContributors(p ...*PostContributor) *PostUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return pu.RemoveContributorIDs(ids...) +} + +// ClearCategory clears the "category" edge to the Category entity. +func (pu *PostUpdate) ClearCategory() *PostUpdate { + pu.mutation.ClearCategory() + return pu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pu *PostUpdate) Save(ctx context.Context) (int, error) { + pu.defaults() + return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pu *PostUpdate) SaveX(ctx context.Context) int { + affected, err := pu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pu *PostUpdate) Exec(ctx context.Context) error { + _, err := pu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pu *PostUpdate) ExecX(ctx context.Context) { + if err := pu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pu *PostUpdate) defaults() { + if _, ok := pu.mutation.UpdatedAt(); !ok { + v := post.UpdateDefaultUpdatedAt() + pu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pu *PostUpdate) check() error { + if v, ok := pu.mutation.Status(); ok { + if err := post.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Post.status": %w`, err)} + } + } + if v, ok := pu.mutation.Slug(); ok { + if err := post.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "Post.slug": %w`, err)} + } + } + return nil +} + +func (pu *PostUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := pu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(post.Table, post.Columns, sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt)) + if ps := pu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pu.mutation.Status(); ok { + _spec.SetField(post.FieldStatus, field.TypeEnum, value) + } + if value, ok := pu.mutation.Slug(); ok { + _spec.SetField(post.FieldSlug, field.TypeString, value) + } + if value, ok := pu.mutation.CreatedAt(); ok { + _spec.SetField(post.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := pu.mutation.UpdatedAt(); ok { + _spec.SetField(post.FieldUpdatedAt, field.TypeTime, value) + } + if pu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedContentsIDs(); len(nodes) > 0 && !pu.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pu.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.RemovedContributorsIDs(); len(nodes) > 0 && !pu.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pu.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: post.CategoryTable, + Columns: []string{post.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pu.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: post.CategoryTable, + Columns: []string{post.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{post.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pu.mutation.done = true + return n, nil +} + +// PostUpdateOne is the builder for updating a single Post entity. +type PostUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PostMutation +} + +// SetStatus sets the "status" field. +func (puo *PostUpdateOne) SetStatus(po post.Status) *PostUpdateOne { + puo.mutation.SetStatus(po) + return puo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (puo *PostUpdateOne) SetNillableStatus(po *post.Status) *PostUpdateOne { + if po != nil { + puo.SetStatus(*po) + } + return puo +} + +// SetSlug sets the "slug" field. +func (puo *PostUpdateOne) SetSlug(s string) *PostUpdateOne { + puo.mutation.SetSlug(s) + return puo +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (puo *PostUpdateOne) SetNillableSlug(s *string) *PostUpdateOne { + if s != nil { + puo.SetSlug(*s) + } + return puo +} + +// SetCreatedAt sets the "created_at" field. +func (puo *PostUpdateOne) SetCreatedAt(t time.Time) *PostUpdateOne { + puo.mutation.SetCreatedAt(t) + return puo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (puo *PostUpdateOne) SetNillableCreatedAt(t *time.Time) *PostUpdateOne { + if t != nil { + puo.SetCreatedAt(*t) + } + return puo +} + +// SetUpdatedAt sets the "updated_at" field. +func (puo *PostUpdateOne) SetUpdatedAt(t time.Time) *PostUpdateOne { + puo.mutation.SetUpdatedAt(t) + return puo +} + +// AddContentIDs adds the "contents" edge to the PostContent entity by IDs. +func (puo *PostUpdateOne) AddContentIDs(ids ...int) *PostUpdateOne { + puo.mutation.AddContentIDs(ids...) + return puo +} + +// AddContents adds the "contents" edges to the PostContent entity. +func (puo *PostUpdateOne) AddContents(p ...*PostContent) *PostUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.AddContentIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the PostContributor entity by IDs. +func (puo *PostUpdateOne) AddContributorIDs(ids ...int) *PostUpdateOne { + puo.mutation.AddContributorIDs(ids...) + return puo +} + +// AddContributors adds the "contributors" edges to the PostContributor entity. +func (puo *PostUpdateOne) AddContributors(p ...*PostContributor) *PostUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.AddContributorIDs(ids...) +} + +// SetCategoryID sets the "category" edge to the Category entity by ID. +func (puo *PostUpdateOne) SetCategoryID(id int) *PostUpdateOne { + puo.mutation.SetCategoryID(id) + return puo +} + +// SetNillableCategoryID sets the "category" edge to the Category entity by ID if the given value is not nil. +func (puo *PostUpdateOne) SetNillableCategoryID(id *int) *PostUpdateOne { + if id != nil { + puo = puo.SetCategoryID(*id) + } + return puo +} + +// SetCategory sets the "category" edge to the Category entity. +func (puo *PostUpdateOne) SetCategory(c *Category) *PostUpdateOne { + return puo.SetCategoryID(c.ID) +} + +// Mutation returns the PostMutation object of the builder. +func (puo *PostUpdateOne) Mutation() *PostMutation { + return puo.mutation +} + +// ClearContents clears all "contents" edges to the PostContent entity. +func (puo *PostUpdateOne) ClearContents() *PostUpdateOne { + puo.mutation.ClearContents() + return puo +} + +// RemoveContentIDs removes the "contents" edge to PostContent entities by IDs. +func (puo *PostUpdateOne) RemoveContentIDs(ids ...int) *PostUpdateOne { + puo.mutation.RemoveContentIDs(ids...) + return puo +} + +// RemoveContents removes "contents" edges to PostContent entities. +func (puo *PostUpdateOne) RemoveContents(p ...*PostContent) *PostUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.RemoveContentIDs(ids...) +} + +// ClearContributors clears all "contributors" edges to the PostContributor entity. +func (puo *PostUpdateOne) ClearContributors() *PostUpdateOne { + puo.mutation.ClearContributors() + return puo +} + +// RemoveContributorIDs removes the "contributors" edge to PostContributor entities by IDs. +func (puo *PostUpdateOne) RemoveContributorIDs(ids ...int) *PostUpdateOne { + puo.mutation.RemoveContributorIDs(ids...) + return puo +} + +// RemoveContributors removes "contributors" edges to PostContributor entities. +func (puo *PostUpdateOne) RemoveContributors(p ...*PostContributor) *PostUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return puo.RemoveContributorIDs(ids...) +} + +// ClearCategory clears the "category" edge to the Category entity. +func (puo *PostUpdateOne) ClearCategory() *PostUpdateOne { + puo.mutation.ClearCategory() + return puo +} + +// Where appends a list predicates to the PostUpdate builder. +func (puo *PostUpdateOne) Where(ps ...predicate.Post) *PostUpdateOne { + puo.mutation.Where(ps...) + return puo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (puo *PostUpdateOne) Select(field string, fields ...string) *PostUpdateOne { + puo.fields = append([]string{field}, fields...) + return puo +} + +// Save executes the query and returns the updated Post entity. +func (puo *PostUpdateOne) Save(ctx context.Context) (*Post, error) { + puo.defaults() + return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (puo *PostUpdateOne) SaveX(ctx context.Context) *Post { + node, err := puo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (puo *PostUpdateOne) Exec(ctx context.Context) error { + _, err := puo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (puo *PostUpdateOne) ExecX(ctx context.Context) { + if err := puo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (puo *PostUpdateOne) defaults() { + if _, ok := puo.mutation.UpdatedAt(); !ok { + v := post.UpdateDefaultUpdatedAt() + puo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (puo *PostUpdateOne) check() error { + if v, ok := puo.mutation.Status(); ok { + if err := post.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Post.status": %w`, err)} + } + } + if v, ok := puo.mutation.Slug(); ok { + if err := post.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "Post.slug": %w`, err)} + } + } + return nil +} + +func (puo *PostUpdateOne) sqlSave(ctx context.Context) (_node *Post, err error) { + if err := puo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(post.Table, post.Columns, sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt)) + id, ok := puo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Post.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := puo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, post.FieldID) + for _, f := range fields { + if !post.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != post.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := puo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := puo.mutation.Status(); ok { + _spec.SetField(post.FieldStatus, field.TypeEnum, value) + } + if value, ok := puo.mutation.Slug(); ok { + _spec.SetField(post.FieldSlug, field.TypeString, value) + } + if value, ok := puo.mutation.CreatedAt(); ok { + _spec.SetField(post.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := puo.mutation.UpdatedAt(); ok { + _spec.SetField(post.FieldUpdatedAt, field.TypeTime, value) + } + if puo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedContentsIDs(); len(nodes) > 0 && !puo.mutation.ContentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.ContentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContentsTable, + Columns: []string{post.ContentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if puo.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.RemovedContributorsIDs(); len(nodes) > 0 && !puo.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: post.ContributorsTable, + Columns: []string{post.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if puo.mutation.CategoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: post.CategoryTable, + Columns: []string{post.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := puo.mutation.CategoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: post.CategoryTable, + Columns: []string{post.CategoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(category.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Post{config: puo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{post.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + puo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/postcontent.go b/backend/ent/postcontent.go new file mode 100644 index 0000000..4e8debe --- /dev/null +++ b/backend/ent/postcontent.go @@ -0,0 +1,208 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// PostContent is the model entity for the PostContent schema. +type PostContent struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // LanguageCode holds the value of the "language_code" field. + LanguageCode postcontent.LanguageCode `json:"language_code,omitempty"` + // Title holds the value of the "title" field. + Title string `json:"title,omitempty"` + // ContentMarkdown holds the value of the "content_markdown" field. + ContentMarkdown string `json:"content_markdown,omitempty"` + // Summary holds the value of the "summary" field. + Summary string `json:"summary,omitempty"` + // MetaKeywords holds the value of the "meta_keywords" field. + MetaKeywords string `json:"meta_keywords,omitempty"` + // MetaDescription holds the value of the "meta_description" field. + MetaDescription string `json:"meta_description,omitempty"` + // Slug holds the value of the "slug" field. + Slug string `json:"slug,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PostContentQuery when eager-loading is set. + Edges PostContentEdges `json:"edges"` + post_contents *int + selectValues sql.SelectValues +} + +// PostContentEdges holds the relations/edges for other nodes in the graph. +type PostContentEdges struct { + // Post holds the value of the post edge. + Post *Post `json:"post,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// PostOrErr returns the Post value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PostContentEdges) PostOrErr() (*Post, error) { + if e.Post != nil { + return e.Post, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: post.Label} + } + return nil, &NotLoadedError{edge: "post"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PostContent) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case postcontent.FieldID: + values[i] = new(sql.NullInt64) + case postcontent.FieldLanguageCode, postcontent.FieldTitle, postcontent.FieldContentMarkdown, postcontent.FieldSummary, postcontent.FieldMetaKeywords, postcontent.FieldMetaDescription, postcontent.FieldSlug: + values[i] = new(sql.NullString) + case postcontent.ForeignKeys[0]: // post_contents + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PostContent fields. +func (pc *PostContent) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case postcontent.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + pc.ID = int(value.Int64) + case postcontent.FieldLanguageCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field language_code", values[i]) + } else if value.Valid { + pc.LanguageCode = postcontent.LanguageCode(value.String) + } + case postcontent.FieldTitle: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field title", values[i]) + } else if value.Valid { + pc.Title = value.String + } + case postcontent.FieldContentMarkdown: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field content_markdown", values[i]) + } else if value.Valid { + pc.ContentMarkdown = value.String + } + case postcontent.FieldSummary: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field summary", values[i]) + } else if value.Valid { + pc.Summary = value.String + } + case postcontent.FieldMetaKeywords: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field meta_keywords", values[i]) + } else if value.Valid { + pc.MetaKeywords = value.String + } + case postcontent.FieldMetaDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field meta_description", values[i]) + } else if value.Valid { + pc.MetaDescription = value.String + } + case postcontent.FieldSlug: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field slug", values[i]) + } else if value.Valid { + pc.Slug = value.String + } + case postcontent.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field post_contents", value) + } else if value.Valid { + pc.post_contents = new(int) + *pc.post_contents = int(value.Int64) + } + default: + pc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PostContent. +// This includes values selected through modifiers, order, etc. +func (pc *PostContent) Value(name string) (ent.Value, error) { + return pc.selectValues.Get(name) +} + +// QueryPost queries the "post" edge of the PostContent entity. +func (pc *PostContent) QueryPost() *PostQuery { + return NewPostContentClient(pc.config).QueryPost(pc) +} + +// Update returns a builder for updating this PostContent. +// Note that you need to call PostContent.Unwrap() before calling this method if this PostContent +// was returned from a transaction, and the transaction was committed or rolled back. +func (pc *PostContent) Update() *PostContentUpdateOne { + return NewPostContentClient(pc.config).UpdateOne(pc) +} + +// Unwrap unwraps the PostContent entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pc *PostContent) Unwrap() *PostContent { + _tx, ok := pc.config.driver.(*txDriver) + if !ok { + panic("ent: PostContent is not a transactional entity") + } + pc.config.driver = _tx.drv + return pc +} + +// String implements the fmt.Stringer. +func (pc *PostContent) String() string { + var builder strings.Builder + builder.WriteString("PostContent(") + builder.WriteString(fmt.Sprintf("id=%v, ", pc.ID)) + builder.WriteString("language_code=") + builder.WriteString(fmt.Sprintf("%v", pc.LanguageCode)) + builder.WriteString(", ") + builder.WriteString("title=") + builder.WriteString(pc.Title) + builder.WriteString(", ") + builder.WriteString("content_markdown=") + builder.WriteString(pc.ContentMarkdown) + builder.WriteString(", ") + builder.WriteString("summary=") + builder.WriteString(pc.Summary) + builder.WriteString(", ") + builder.WriteString("meta_keywords=") + builder.WriteString(pc.MetaKeywords) + builder.WriteString(", ") + builder.WriteString("meta_description=") + builder.WriteString(pc.MetaDescription) + builder.WriteString(", ") + builder.WriteString("slug=") + builder.WriteString(pc.Slug) + builder.WriteByte(')') + return builder.String() +} + +// PostContents is a parsable slice of PostContent. +type PostContents []*PostContent diff --git a/backend/ent/postcontent/postcontent.go b/backend/ent/postcontent/postcontent.go new file mode 100644 index 0000000..c7ea621 --- /dev/null +++ b/backend/ent/postcontent/postcontent.go @@ -0,0 +1,167 @@ +// Code generated by ent, DO NOT EDIT. + +package postcontent + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the postcontent type in the database. + Label = "post_content" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldLanguageCode holds the string denoting the language_code field in the database. + FieldLanguageCode = "language_code" + // FieldTitle holds the string denoting the title field in the database. + FieldTitle = "title" + // FieldContentMarkdown holds the string denoting the content_markdown field in the database. + FieldContentMarkdown = "content_markdown" + // FieldSummary holds the string denoting the summary field in the database. + FieldSummary = "summary" + // FieldMetaKeywords holds the string denoting the meta_keywords field in the database. + FieldMetaKeywords = "meta_keywords" + // FieldMetaDescription holds the string denoting the meta_description field in the database. + FieldMetaDescription = "meta_description" + // FieldSlug holds the string denoting the slug field in the database. + FieldSlug = "slug" + // EdgePost holds the string denoting the post edge name in mutations. + EdgePost = "post" + // Table holds the table name of the postcontent in the database. + Table = "post_contents" + // PostTable is the table that holds the post relation/edge. + PostTable = "post_contents" + // PostInverseTable is the table name for the Post entity. + // It exists in this package in order to avoid circular dependency with the "post" package. + PostInverseTable = "posts" + // PostColumn is the table column denoting the post relation/edge. + PostColumn = "post_contents" +) + +// Columns holds all SQL columns for postcontent fields. +var Columns = []string{ + FieldID, + FieldLanguageCode, + FieldTitle, + FieldContentMarkdown, + FieldSummary, + FieldMetaKeywords, + FieldMetaDescription, + FieldSlug, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "post_contents" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "post_contents", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // TitleValidator is a validator for the "title" field. It is called by the builders before save. + TitleValidator func(string) error + // ContentMarkdownValidator is a validator for the "content_markdown" field. It is called by the builders before save. + ContentMarkdownValidator func(string) error + // SummaryValidator is a validator for the "summary" field. It is called by the builders before save. + SummaryValidator func(string) error + // SlugValidator is a validator for the "slug" field. It is called by the builders before save. + SlugValidator func(string) error +) + +// LanguageCode defines the type for the "language_code" enum field. +type LanguageCode string + +// LanguageCode values. +const ( + LanguageCodeEN LanguageCode = "en" + LanguageCodeZH_HANS LanguageCode = "zh-Hans" + LanguageCodeZH_HANT LanguageCode = "zh-Hant" +) + +func (lc LanguageCode) String() string { + return string(lc) +} + +// LanguageCodeValidator is a validator for the "language_code" field enum values. It is called by the builders before save. +func LanguageCodeValidator(lc LanguageCode) error { + switch lc { + case LanguageCodeEN, LanguageCodeZH_HANS, LanguageCodeZH_HANT: + return nil + default: + return fmt.Errorf("postcontent: invalid enum value for language_code field: %q", lc) + } +} + +// OrderOption defines the ordering options for the PostContent queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByLanguageCode orders the results by the language_code field. +func ByLanguageCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguageCode, opts...).ToFunc() +} + +// ByTitle orders the results by the title field. +func ByTitle(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTitle, opts...).ToFunc() +} + +// ByContentMarkdown orders the results by the content_markdown field. +func ByContentMarkdown(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldContentMarkdown, opts...).ToFunc() +} + +// BySummary orders the results by the summary field. +func BySummary(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSummary, opts...).ToFunc() +} + +// ByMetaKeywords orders the results by the meta_keywords field. +func ByMetaKeywords(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMetaKeywords, opts...).ToFunc() +} + +// ByMetaDescription orders the results by the meta_description field. +func ByMetaDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMetaDescription, opts...).ToFunc() +} + +// BySlug orders the results by the slug field. +func BySlug(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSlug, opts...).ToFunc() +} + +// ByPostField orders the results by post field. +func ByPostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPostStep(), sql.OrderByField(field, opts...)) + } +} +func newPostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PostTable, PostColumn), + ) +} diff --git a/backend/ent/postcontent/where.go b/backend/ent/postcontent/where.go new file mode 100644 index 0000000..61ebd36 --- /dev/null +++ b/backend/ent/postcontent/where.go @@ -0,0 +1,553 @@ +// Code generated by ent, DO NOT EDIT. + +package postcontent + +import ( + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldID, id)) +} + +// Title applies equality check predicate on the "title" field. It's identical to TitleEQ. +func Title(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldTitle, v)) +} + +// ContentMarkdown applies equality check predicate on the "content_markdown" field. It's identical to ContentMarkdownEQ. +func ContentMarkdown(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldContentMarkdown, v)) +} + +// Summary applies equality check predicate on the "summary" field. It's identical to SummaryEQ. +func Summary(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldSummary, v)) +} + +// MetaKeywords applies equality check predicate on the "meta_keywords" field. It's identical to MetaKeywordsEQ. +func MetaKeywords(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldMetaKeywords, v)) +} + +// MetaDescription applies equality check predicate on the "meta_description" field. It's identical to MetaDescriptionEQ. +func MetaDescription(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldMetaDescription, v)) +} + +// Slug applies equality check predicate on the "slug" field. It's identical to SlugEQ. +func Slug(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldSlug, v)) +} + +// LanguageCodeEQ applies the EQ predicate on the "language_code" field. +func LanguageCodeEQ(v LanguageCode) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldLanguageCode, v)) +} + +// LanguageCodeNEQ applies the NEQ predicate on the "language_code" field. +func LanguageCodeNEQ(v LanguageCode) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldLanguageCode, v)) +} + +// LanguageCodeIn applies the In predicate on the "language_code" field. +func LanguageCodeIn(vs ...LanguageCode) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeNotIn applies the NotIn predicate on the "language_code" field. +func LanguageCodeNotIn(vs ...LanguageCode) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldLanguageCode, vs...)) +} + +// TitleEQ applies the EQ predicate on the "title" field. +func TitleEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldTitle, v)) +} + +// TitleNEQ applies the NEQ predicate on the "title" field. +func TitleNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldTitle, v)) +} + +// TitleIn applies the In predicate on the "title" field. +func TitleIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldTitle, vs...)) +} + +// TitleNotIn applies the NotIn predicate on the "title" field. +func TitleNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldTitle, vs...)) +} + +// TitleGT applies the GT predicate on the "title" field. +func TitleGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldTitle, v)) +} + +// TitleGTE applies the GTE predicate on the "title" field. +func TitleGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldTitle, v)) +} + +// TitleLT applies the LT predicate on the "title" field. +func TitleLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldTitle, v)) +} + +// TitleLTE applies the LTE predicate on the "title" field. +func TitleLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldTitle, v)) +} + +// TitleContains applies the Contains predicate on the "title" field. +func TitleContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldTitle, v)) +} + +// TitleHasPrefix applies the HasPrefix predicate on the "title" field. +func TitleHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldTitle, v)) +} + +// TitleHasSuffix applies the HasSuffix predicate on the "title" field. +func TitleHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldTitle, v)) +} + +// TitleEqualFold applies the EqualFold predicate on the "title" field. +func TitleEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldTitle, v)) +} + +// TitleContainsFold applies the ContainsFold predicate on the "title" field. +func TitleContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldTitle, v)) +} + +// ContentMarkdownEQ applies the EQ predicate on the "content_markdown" field. +func ContentMarkdownEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldContentMarkdown, v)) +} + +// ContentMarkdownNEQ applies the NEQ predicate on the "content_markdown" field. +func ContentMarkdownNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldContentMarkdown, v)) +} + +// ContentMarkdownIn applies the In predicate on the "content_markdown" field. +func ContentMarkdownIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldContentMarkdown, vs...)) +} + +// ContentMarkdownNotIn applies the NotIn predicate on the "content_markdown" field. +func ContentMarkdownNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldContentMarkdown, vs...)) +} + +// ContentMarkdownGT applies the GT predicate on the "content_markdown" field. +func ContentMarkdownGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldContentMarkdown, v)) +} + +// ContentMarkdownGTE applies the GTE predicate on the "content_markdown" field. +func ContentMarkdownGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldContentMarkdown, v)) +} + +// ContentMarkdownLT applies the LT predicate on the "content_markdown" field. +func ContentMarkdownLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldContentMarkdown, v)) +} + +// ContentMarkdownLTE applies the LTE predicate on the "content_markdown" field. +func ContentMarkdownLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldContentMarkdown, v)) +} + +// ContentMarkdownContains applies the Contains predicate on the "content_markdown" field. +func ContentMarkdownContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldContentMarkdown, v)) +} + +// ContentMarkdownHasPrefix applies the HasPrefix predicate on the "content_markdown" field. +func ContentMarkdownHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldContentMarkdown, v)) +} + +// ContentMarkdownHasSuffix applies the HasSuffix predicate on the "content_markdown" field. +func ContentMarkdownHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldContentMarkdown, v)) +} + +// ContentMarkdownEqualFold applies the EqualFold predicate on the "content_markdown" field. +func ContentMarkdownEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldContentMarkdown, v)) +} + +// ContentMarkdownContainsFold applies the ContainsFold predicate on the "content_markdown" field. +func ContentMarkdownContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldContentMarkdown, v)) +} + +// SummaryEQ applies the EQ predicate on the "summary" field. +func SummaryEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldSummary, v)) +} + +// SummaryNEQ applies the NEQ predicate on the "summary" field. +func SummaryNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldSummary, v)) +} + +// SummaryIn applies the In predicate on the "summary" field. +func SummaryIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldSummary, vs...)) +} + +// SummaryNotIn applies the NotIn predicate on the "summary" field. +func SummaryNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldSummary, vs...)) +} + +// SummaryGT applies the GT predicate on the "summary" field. +func SummaryGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldSummary, v)) +} + +// SummaryGTE applies the GTE predicate on the "summary" field. +func SummaryGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldSummary, v)) +} + +// SummaryLT applies the LT predicate on the "summary" field. +func SummaryLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldSummary, v)) +} + +// SummaryLTE applies the LTE predicate on the "summary" field. +func SummaryLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldSummary, v)) +} + +// SummaryContains applies the Contains predicate on the "summary" field. +func SummaryContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldSummary, v)) +} + +// SummaryHasPrefix applies the HasPrefix predicate on the "summary" field. +func SummaryHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldSummary, v)) +} + +// SummaryHasSuffix applies the HasSuffix predicate on the "summary" field. +func SummaryHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldSummary, v)) +} + +// SummaryEqualFold applies the EqualFold predicate on the "summary" field. +func SummaryEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldSummary, v)) +} + +// SummaryContainsFold applies the ContainsFold predicate on the "summary" field. +func SummaryContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldSummary, v)) +} + +// MetaKeywordsEQ applies the EQ predicate on the "meta_keywords" field. +func MetaKeywordsEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldMetaKeywords, v)) +} + +// MetaKeywordsNEQ applies the NEQ predicate on the "meta_keywords" field. +func MetaKeywordsNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldMetaKeywords, v)) +} + +// MetaKeywordsIn applies the In predicate on the "meta_keywords" field. +func MetaKeywordsIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldMetaKeywords, vs...)) +} + +// MetaKeywordsNotIn applies the NotIn predicate on the "meta_keywords" field. +func MetaKeywordsNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldMetaKeywords, vs...)) +} + +// MetaKeywordsGT applies the GT predicate on the "meta_keywords" field. +func MetaKeywordsGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldMetaKeywords, v)) +} + +// MetaKeywordsGTE applies the GTE predicate on the "meta_keywords" field. +func MetaKeywordsGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldMetaKeywords, v)) +} + +// MetaKeywordsLT applies the LT predicate on the "meta_keywords" field. +func MetaKeywordsLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldMetaKeywords, v)) +} + +// MetaKeywordsLTE applies the LTE predicate on the "meta_keywords" field. +func MetaKeywordsLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldMetaKeywords, v)) +} + +// MetaKeywordsContains applies the Contains predicate on the "meta_keywords" field. +func MetaKeywordsContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldMetaKeywords, v)) +} + +// MetaKeywordsHasPrefix applies the HasPrefix predicate on the "meta_keywords" field. +func MetaKeywordsHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldMetaKeywords, v)) +} + +// MetaKeywordsHasSuffix applies the HasSuffix predicate on the "meta_keywords" field. +func MetaKeywordsHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldMetaKeywords, v)) +} + +// MetaKeywordsIsNil applies the IsNil predicate on the "meta_keywords" field. +func MetaKeywordsIsNil() predicate.PostContent { + return predicate.PostContent(sql.FieldIsNull(FieldMetaKeywords)) +} + +// MetaKeywordsNotNil applies the NotNil predicate on the "meta_keywords" field. +func MetaKeywordsNotNil() predicate.PostContent { + return predicate.PostContent(sql.FieldNotNull(FieldMetaKeywords)) +} + +// MetaKeywordsEqualFold applies the EqualFold predicate on the "meta_keywords" field. +func MetaKeywordsEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldMetaKeywords, v)) +} + +// MetaKeywordsContainsFold applies the ContainsFold predicate on the "meta_keywords" field. +func MetaKeywordsContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldMetaKeywords, v)) +} + +// MetaDescriptionEQ applies the EQ predicate on the "meta_description" field. +func MetaDescriptionEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldMetaDescription, v)) +} + +// MetaDescriptionNEQ applies the NEQ predicate on the "meta_description" field. +func MetaDescriptionNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldMetaDescription, v)) +} + +// MetaDescriptionIn applies the In predicate on the "meta_description" field. +func MetaDescriptionIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldMetaDescription, vs...)) +} + +// MetaDescriptionNotIn applies the NotIn predicate on the "meta_description" field. +func MetaDescriptionNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldMetaDescription, vs...)) +} + +// MetaDescriptionGT applies the GT predicate on the "meta_description" field. +func MetaDescriptionGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldMetaDescription, v)) +} + +// MetaDescriptionGTE applies the GTE predicate on the "meta_description" field. +func MetaDescriptionGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldMetaDescription, v)) +} + +// MetaDescriptionLT applies the LT predicate on the "meta_description" field. +func MetaDescriptionLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldMetaDescription, v)) +} + +// MetaDescriptionLTE applies the LTE predicate on the "meta_description" field. +func MetaDescriptionLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldMetaDescription, v)) +} + +// MetaDescriptionContains applies the Contains predicate on the "meta_description" field. +func MetaDescriptionContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldMetaDescription, v)) +} + +// MetaDescriptionHasPrefix applies the HasPrefix predicate on the "meta_description" field. +func MetaDescriptionHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldMetaDescription, v)) +} + +// MetaDescriptionHasSuffix applies the HasSuffix predicate on the "meta_description" field. +func MetaDescriptionHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldMetaDescription, v)) +} + +// MetaDescriptionIsNil applies the IsNil predicate on the "meta_description" field. +func MetaDescriptionIsNil() predicate.PostContent { + return predicate.PostContent(sql.FieldIsNull(FieldMetaDescription)) +} + +// MetaDescriptionNotNil applies the NotNil predicate on the "meta_description" field. +func MetaDescriptionNotNil() predicate.PostContent { + return predicate.PostContent(sql.FieldNotNull(FieldMetaDescription)) +} + +// MetaDescriptionEqualFold applies the EqualFold predicate on the "meta_description" field. +func MetaDescriptionEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldMetaDescription, v)) +} + +// MetaDescriptionContainsFold applies the ContainsFold predicate on the "meta_description" field. +func MetaDescriptionContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldMetaDescription, v)) +} + +// SlugEQ applies the EQ predicate on the "slug" field. +func SlugEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEQ(FieldSlug, v)) +} + +// SlugNEQ applies the NEQ predicate on the "slug" field. +func SlugNEQ(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldNEQ(FieldSlug, v)) +} + +// SlugIn applies the In predicate on the "slug" field. +func SlugIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldIn(FieldSlug, vs...)) +} + +// SlugNotIn applies the NotIn predicate on the "slug" field. +func SlugNotIn(vs ...string) predicate.PostContent { + return predicate.PostContent(sql.FieldNotIn(FieldSlug, vs...)) +} + +// SlugGT applies the GT predicate on the "slug" field. +func SlugGT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGT(FieldSlug, v)) +} + +// SlugGTE applies the GTE predicate on the "slug" field. +func SlugGTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldGTE(FieldSlug, v)) +} + +// SlugLT applies the LT predicate on the "slug" field. +func SlugLT(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLT(FieldSlug, v)) +} + +// SlugLTE applies the LTE predicate on the "slug" field. +func SlugLTE(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldLTE(FieldSlug, v)) +} + +// SlugContains applies the Contains predicate on the "slug" field. +func SlugContains(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContains(FieldSlug, v)) +} + +// SlugHasPrefix applies the HasPrefix predicate on the "slug" field. +func SlugHasPrefix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasPrefix(FieldSlug, v)) +} + +// SlugHasSuffix applies the HasSuffix predicate on the "slug" field. +func SlugHasSuffix(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldHasSuffix(FieldSlug, v)) +} + +// SlugEqualFold applies the EqualFold predicate on the "slug" field. +func SlugEqualFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldEqualFold(FieldSlug, v)) +} + +// SlugContainsFold applies the ContainsFold predicate on the "slug" field. +func SlugContainsFold(v string) predicate.PostContent { + return predicate.PostContent(sql.FieldContainsFold(FieldSlug, v)) +} + +// HasPost applies the HasEdge predicate on the "post" edge. +func HasPost() predicate.PostContent { + return predicate.PostContent(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PostTable, PostColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPostWith applies the HasEdge predicate on the "post" edge with a given conditions (other predicates). +func HasPostWith(preds ...predicate.Post) predicate.PostContent { + return predicate.PostContent(func(s *sql.Selector) { + step := newPostStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PostContent) predicate.PostContent { + return predicate.PostContent(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PostContent) predicate.PostContent { + return predicate.PostContent(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PostContent) predicate.PostContent { + return predicate.PostContent(sql.NotPredicates(p)) +} diff --git a/backend/ent/postcontent_create.go b/backend/ent/postcontent_create.go new file mode 100644 index 0000000..ef2b548 --- /dev/null +++ b/backend/ent/postcontent_create.go @@ -0,0 +1,333 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContentCreate is the builder for creating a PostContent entity. +type PostContentCreate struct { + config + mutation *PostContentMutation + hooks []Hook +} + +// SetLanguageCode sets the "language_code" field. +func (pcc *PostContentCreate) SetLanguageCode(pc postcontent.LanguageCode) *PostContentCreate { + pcc.mutation.SetLanguageCode(pc) + return pcc +} + +// SetTitle sets the "title" field. +func (pcc *PostContentCreate) SetTitle(s string) *PostContentCreate { + pcc.mutation.SetTitle(s) + return pcc +} + +// SetContentMarkdown sets the "content_markdown" field. +func (pcc *PostContentCreate) SetContentMarkdown(s string) *PostContentCreate { + pcc.mutation.SetContentMarkdown(s) + return pcc +} + +// SetSummary sets the "summary" field. +func (pcc *PostContentCreate) SetSummary(s string) *PostContentCreate { + pcc.mutation.SetSummary(s) + return pcc +} + +// SetMetaKeywords sets the "meta_keywords" field. +func (pcc *PostContentCreate) SetMetaKeywords(s string) *PostContentCreate { + pcc.mutation.SetMetaKeywords(s) + return pcc +} + +// SetNillableMetaKeywords sets the "meta_keywords" field if the given value is not nil. +func (pcc *PostContentCreate) SetNillableMetaKeywords(s *string) *PostContentCreate { + if s != nil { + pcc.SetMetaKeywords(*s) + } + return pcc +} + +// SetMetaDescription sets the "meta_description" field. +func (pcc *PostContentCreate) SetMetaDescription(s string) *PostContentCreate { + pcc.mutation.SetMetaDescription(s) + return pcc +} + +// SetNillableMetaDescription sets the "meta_description" field if the given value is not nil. +func (pcc *PostContentCreate) SetNillableMetaDescription(s *string) *PostContentCreate { + if s != nil { + pcc.SetMetaDescription(*s) + } + return pcc +} + +// SetSlug sets the "slug" field. +func (pcc *PostContentCreate) SetSlug(s string) *PostContentCreate { + pcc.mutation.SetSlug(s) + return pcc +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcc *PostContentCreate) SetPostID(id int) *PostContentCreate { + pcc.mutation.SetPostID(id) + return pcc +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcc *PostContentCreate) SetNillablePostID(id *int) *PostContentCreate { + if id != nil { + pcc = pcc.SetPostID(*id) + } + return pcc +} + +// SetPost sets the "post" edge to the Post entity. +func (pcc *PostContentCreate) SetPost(p *Post) *PostContentCreate { + return pcc.SetPostID(p.ID) +} + +// Mutation returns the PostContentMutation object of the builder. +func (pcc *PostContentCreate) Mutation() *PostContentMutation { + return pcc.mutation +} + +// Save creates the PostContent in the database. +func (pcc *PostContentCreate) Save(ctx context.Context) (*PostContent, error) { + return withHooks(ctx, pcc.sqlSave, pcc.mutation, pcc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pcc *PostContentCreate) SaveX(ctx context.Context) *PostContent { + v, err := pcc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcc *PostContentCreate) Exec(ctx context.Context) error { + _, err := pcc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcc *PostContentCreate) ExecX(ctx context.Context) { + if err := pcc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcc *PostContentCreate) check() error { + if _, ok := pcc.mutation.LanguageCode(); !ok { + return &ValidationError{Name: "language_code", err: errors.New(`ent: missing required field "PostContent.language_code"`)} + } + if v, ok := pcc.mutation.LanguageCode(); ok { + if err := postcontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContent.language_code": %w`, err)} + } + } + if _, ok := pcc.mutation.Title(); !ok { + return &ValidationError{Name: "title", err: errors.New(`ent: missing required field "PostContent.title"`)} + } + if v, ok := pcc.mutation.Title(); ok { + if err := postcontent.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "PostContent.title": %w`, err)} + } + } + if _, ok := pcc.mutation.ContentMarkdown(); !ok { + return &ValidationError{Name: "content_markdown", err: errors.New(`ent: missing required field "PostContent.content_markdown"`)} + } + if v, ok := pcc.mutation.ContentMarkdown(); ok { + if err := postcontent.ContentMarkdownValidator(v); err != nil { + return &ValidationError{Name: "content_markdown", err: fmt.Errorf(`ent: validator failed for field "PostContent.content_markdown": %w`, err)} + } + } + if _, ok := pcc.mutation.Summary(); !ok { + return &ValidationError{Name: "summary", err: errors.New(`ent: missing required field "PostContent.summary"`)} + } + if v, ok := pcc.mutation.Summary(); ok { + if err := postcontent.SummaryValidator(v); err != nil { + return &ValidationError{Name: "summary", err: fmt.Errorf(`ent: validator failed for field "PostContent.summary": %w`, err)} + } + } + if _, ok := pcc.mutation.Slug(); !ok { + return &ValidationError{Name: "slug", err: errors.New(`ent: missing required field "PostContent.slug"`)} + } + if v, ok := pcc.mutation.Slug(); ok { + if err := postcontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "PostContent.slug": %w`, err)} + } + } + return nil +} + +func (pcc *PostContentCreate) sqlSave(ctx context.Context) (*PostContent, error) { + if err := pcc.check(); err != nil { + return nil, err + } + _node, _spec := pcc.createSpec() + if err := sqlgraph.CreateNode(ctx, pcc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + pcc.mutation.id = &_node.ID + pcc.mutation.done = true + return _node, nil +} + +func (pcc *PostContentCreate) createSpec() (*PostContent, *sqlgraph.CreateSpec) { + var ( + _node = &PostContent{config: pcc.config} + _spec = sqlgraph.NewCreateSpec(postcontent.Table, sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt)) + ) + if value, ok := pcc.mutation.LanguageCode(); ok { + _spec.SetField(postcontent.FieldLanguageCode, field.TypeEnum, value) + _node.LanguageCode = value + } + if value, ok := pcc.mutation.Title(); ok { + _spec.SetField(postcontent.FieldTitle, field.TypeString, value) + _node.Title = value + } + if value, ok := pcc.mutation.ContentMarkdown(); ok { + _spec.SetField(postcontent.FieldContentMarkdown, field.TypeString, value) + _node.ContentMarkdown = value + } + if value, ok := pcc.mutation.Summary(); ok { + _spec.SetField(postcontent.FieldSummary, field.TypeString, value) + _node.Summary = value + } + if value, ok := pcc.mutation.MetaKeywords(); ok { + _spec.SetField(postcontent.FieldMetaKeywords, field.TypeString, value) + _node.MetaKeywords = value + } + if value, ok := pcc.mutation.MetaDescription(); ok { + _spec.SetField(postcontent.FieldMetaDescription, field.TypeString, value) + _node.MetaDescription = value + } + if value, ok := pcc.mutation.Slug(); ok { + _spec.SetField(postcontent.FieldSlug, field.TypeString, value) + _node.Slug = value + } + if nodes := pcc.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontent.PostTable, + Columns: []string{postcontent.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.post_contents = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// PostContentCreateBulk is the builder for creating many PostContent entities in bulk. +type PostContentCreateBulk struct { + config + err error + builders []*PostContentCreate +} + +// Save creates the PostContent entities in the database. +func (pccb *PostContentCreateBulk) Save(ctx context.Context) ([]*PostContent, error) { + if pccb.err != nil { + return nil, pccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(pccb.builders)) + nodes := make([]*PostContent, len(pccb.builders)) + mutators := make([]Mutator, len(pccb.builders)) + for i := range pccb.builders { + func(i int, root context.Context) { + builder := pccb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PostContentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pccb *PostContentCreateBulk) SaveX(ctx context.Context) []*PostContent { + v, err := pccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pccb *PostContentCreateBulk) Exec(ctx context.Context) error { + _, err := pccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pccb *PostContentCreateBulk) ExecX(ctx context.Context) { + if err := pccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/postcontent_delete.go b/backend/ent/postcontent_delete.go new file mode 100644 index 0000000..140d57e --- /dev/null +++ b/backend/ent/postcontent_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContentDelete is the builder for deleting a PostContent entity. +type PostContentDelete struct { + config + hooks []Hook + mutation *PostContentMutation +} + +// Where appends a list predicates to the PostContentDelete builder. +func (pcd *PostContentDelete) Where(ps ...predicate.PostContent) *PostContentDelete { + pcd.mutation.Where(ps...) + return pcd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pcd *PostContentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pcd.sqlExec, pcd.mutation, pcd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcd *PostContentDelete) ExecX(ctx context.Context) int { + n, err := pcd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pcd *PostContentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(postcontent.Table, sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt)) + if ps := pcd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pcd.mutation.done = true + return affected, err +} + +// PostContentDeleteOne is the builder for deleting a single PostContent entity. +type PostContentDeleteOne struct { + pcd *PostContentDelete +} + +// Where appends a list predicates to the PostContentDelete builder. +func (pcdo *PostContentDeleteOne) Where(ps ...predicate.PostContent) *PostContentDeleteOne { + pcdo.pcd.mutation.Where(ps...) + return pcdo +} + +// Exec executes the deletion query. +func (pcdo *PostContentDeleteOne) Exec(ctx context.Context) error { + n, err := pcdo.pcd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{postcontent.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcdo *PostContentDeleteOne) ExecX(ctx context.Context) { + if err := pcdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/postcontent_query.go b/backend/ent/postcontent_query.go new file mode 100644 index 0000000..a6ff5d8 --- /dev/null +++ b/backend/ent/postcontent_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContentQuery is the builder for querying PostContent entities. +type PostContentQuery struct { + config + ctx *QueryContext + order []postcontent.OrderOption + inters []Interceptor + predicates []predicate.PostContent + withPost *PostQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PostContentQuery builder. +func (pcq *PostContentQuery) Where(ps ...predicate.PostContent) *PostContentQuery { + pcq.predicates = append(pcq.predicates, ps...) + return pcq +} + +// Limit the number of records to be returned by this query. +func (pcq *PostContentQuery) Limit(limit int) *PostContentQuery { + pcq.ctx.Limit = &limit + return pcq +} + +// Offset to start from. +func (pcq *PostContentQuery) Offset(offset int) *PostContentQuery { + pcq.ctx.Offset = &offset + return pcq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pcq *PostContentQuery) Unique(unique bool) *PostContentQuery { + pcq.ctx.Unique = &unique + return pcq +} + +// Order specifies how the records should be ordered. +func (pcq *PostContentQuery) Order(o ...postcontent.OrderOption) *PostContentQuery { + pcq.order = append(pcq.order, o...) + return pcq +} + +// QueryPost chains the current query on the "post" edge. +func (pcq *PostContentQuery) QueryPost() *PostQuery { + query := (&PostClient{config: pcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(postcontent.Table, postcontent.FieldID, selector), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontent.PostTable, postcontent.PostColumn), + ) + fromU = sqlgraph.SetNeighbors(pcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PostContent entity from the query. +// Returns a *NotFoundError when no PostContent was found. +func (pcq *PostContentQuery) First(ctx context.Context) (*PostContent, error) { + nodes, err := pcq.Limit(1).All(setContextOp(ctx, pcq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{postcontent.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pcq *PostContentQuery) FirstX(ctx context.Context) *PostContent { + node, err := pcq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PostContent ID from the query. +// Returns a *NotFoundError when no PostContent ID was found. +func (pcq *PostContentQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pcq.Limit(1).IDs(setContextOp(ctx, pcq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{postcontent.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pcq *PostContentQuery) FirstIDX(ctx context.Context) int { + id, err := pcq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PostContent entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PostContent entity is found. +// Returns a *NotFoundError when no PostContent entities are found. +func (pcq *PostContentQuery) Only(ctx context.Context) (*PostContent, error) { + nodes, err := pcq.Limit(2).All(setContextOp(ctx, pcq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{postcontent.Label} + default: + return nil, &NotSingularError{postcontent.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pcq *PostContentQuery) OnlyX(ctx context.Context) *PostContent { + node, err := pcq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PostContent ID in the query. +// Returns a *NotSingularError when more than one PostContent ID is found. +// Returns a *NotFoundError when no entities are found. +func (pcq *PostContentQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pcq.Limit(2).IDs(setContextOp(ctx, pcq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{postcontent.Label} + default: + err = &NotSingularError{postcontent.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pcq *PostContentQuery) OnlyIDX(ctx context.Context) int { + id, err := pcq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PostContents. +func (pcq *PostContentQuery) All(ctx context.Context) ([]*PostContent, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryAll) + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PostContent, *PostContentQuery]() + return withInterceptors[[]*PostContent](ctx, pcq, qr, pcq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pcq *PostContentQuery) AllX(ctx context.Context) []*PostContent { + nodes, err := pcq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PostContent IDs. +func (pcq *PostContentQuery) IDs(ctx context.Context) (ids []int, err error) { + if pcq.ctx.Unique == nil && pcq.path != nil { + pcq.Unique(true) + } + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryIDs) + if err = pcq.Select(postcontent.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pcq *PostContentQuery) IDsX(ctx context.Context) []int { + ids, err := pcq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pcq *PostContentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryCount) + if err := pcq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pcq, querierCount[*PostContentQuery](), pcq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pcq *PostContentQuery) CountX(ctx context.Context) int { + count, err := pcq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pcq *PostContentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryExist) + switch _, err := pcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pcq *PostContentQuery) ExistX(ctx context.Context) bool { + exist, err := pcq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PostContentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pcq *PostContentQuery) Clone() *PostContentQuery { + if pcq == nil { + return nil + } + return &PostContentQuery{ + config: pcq.config, + ctx: pcq.ctx.Clone(), + order: append([]postcontent.OrderOption{}, pcq.order...), + inters: append([]Interceptor{}, pcq.inters...), + predicates: append([]predicate.PostContent{}, pcq.predicates...), + withPost: pcq.withPost.Clone(), + // clone intermediate query. + sql: pcq.sql.Clone(), + path: pcq.path, + } +} + +// WithPost tells the query-builder to eager-load the nodes that are connected to +// the "post" edge. The optional arguments are used to configure the query builder of the edge. +func (pcq *PostContentQuery) WithPost(opts ...func(*PostQuery)) *PostContentQuery { + query := (&PostClient{config: pcq.config}).Query() + for _, opt := range opts { + opt(query) + } + pcq.withPost = query + return pcq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// LanguageCode postcontent.LanguageCode `json:"language_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PostContent.Query(). +// GroupBy(postcontent.FieldLanguageCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (pcq *PostContentQuery) GroupBy(field string, fields ...string) *PostContentGroupBy { + pcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PostContentGroupBy{build: pcq} + grbuild.flds = &pcq.ctx.Fields + grbuild.label = postcontent.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// LanguageCode postcontent.LanguageCode `json:"language_code,omitempty"` +// } +// +// client.PostContent.Query(). +// Select(postcontent.FieldLanguageCode). +// Scan(ctx, &v) +func (pcq *PostContentQuery) Select(fields ...string) *PostContentSelect { + pcq.ctx.Fields = append(pcq.ctx.Fields, fields...) + sbuild := &PostContentSelect{PostContentQuery: pcq} + sbuild.label = postcontent.Label + sbuild.flds, sbuild.scan = &pcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PostContentSelect configured with the given aggregations. +func (pcq *PostContentQuery) Aggregate(fns ...AggregateFunc) *PostContentSelect { + return pcq.Select().Aggregate(fns...) +} + +func (pcq *PostContentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pcq); err != nil { + return err + } + } + } + for _, f := range pcq.ctx.Fields { + if !postcontent.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if pcq.path != nil { + prev, err := pcq.path(ctx) + if err != nil { + return err + } + pcq.sql = prev + } + return nil +} + +func (pcq *PostContentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PostContent, error) { + var ( + nodes = []*PostContent{} + withFKs = pcq.withFKs + _spec = pcq.querySpec() + loadedTypes = [1]bool{ + pcq.withPost != nil, + } + ) + if pcq.withPost != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, postcontent.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PostContent).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PostContent{config: pcq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pcq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := pcq.withPost; query != nil { + if err := pcq.loadPost(ctx, query, nodes, nil, + func(n *PostContent, e *Post) { n.Edges.Post = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (pcq *PostContentQuery) loadPost(ctx context.Context, query *PostQuery, nodes []*PostContent, init func(*PostContent), assign func(*PostContent, *Post)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*PostContent) + for i := range nodes { + if nodes[i].post_contents == nil { + continue + } + fk := *nodes[i].post_contents + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(post.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "post_contents" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (pcq *PostContentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pcq.querySpec() + _spec.Node.Columns = pcq.ctx.Fields + if len(pcq.ctx.Fields) > 0 { + _spec.Unique = pcq.ctx.Unique != nil && *pcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pcq.driver, _spec) +} + +func (pcq *PostContentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(postcontent.Table, postcontent.Columns, sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt)) + _spec.From = pcq.sql + if unique := pcq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pcq.path != nil { + _spec.Unique = true + } + if fields := pcq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, postcontent.FieldID) + for i := range fields { + if fields[i] != postcontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pcq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pcq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pcq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pcq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pcq *PostContentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pcq.driver.Dialect()) + t1 := builder.Table(postcontent.Table) + columns := pcq.ctx.Fields + if len(columns) == 0 { + columns = postcontent.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pcq.sql != nil { + selector = pcq.sql + selector.Select(selector.Columns(columns...)...) + } + if pcq.ctx.Unique != nil && *pcq.ctx.Unique { + selector.Distinct() + } + for _, p := range pcq.predicates { + p(selector) + } + for _, p := range pcq.order { + p(selector) + } + if offset := pcq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pcq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// PostContentGroupBy is the group-by builder for PostContent entities. +type PostContentGroupBy struct { + selector + build *PostContentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pcgb *PostContentGroupBy) Aggregate(fns ...AggregateFunc) *PostContentGroupBy { + pcgb.fns = append(pcgb.fns, fns...) + return pcgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pcgb *PostContentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pcgb.build.ctx, ent.OpQueryGroupBy) + if err := pcgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostContentQuery, *PostContentGroupBy](ctx, pcgb.build, pcgb, pcgb.build.inters, v) +} + +func (pcgb *PostContentGroupBy) sqlScan(ctx context.Context, root *PostContentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pcgb.fns)) + for _, fn := range pcgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pcgb.flds)+len(pcgb.fns)) + for _, f := range *pcgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pcgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pcgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PostContentSelect is the builder for selecting fields of PostContent entities. +type PostContentSelect struct { + *PostContentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pcs *PostContentSelect) Aggregate(fns ...AggregateFunc) *PostContentSelect { + pcs.fns = append(pcs.fns, fns...) + return pcs +} + +// Scan applies the selector query and scans the result into the given value. +func (pcs *PostContentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pcs.ctx, ent.OpQuerySelect) + if err := pcs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostContentQuery, *PostContentSelect](ctx, pcs.PostContentQuery, pcs, pcs.inters, v) +} + +func (pcs *PostContentSelect) sqlScan(ctx context.Context, root *PostContentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pcs.fns)) + for _, fn := range pcs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*pcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pcs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/postcontent_update.go b/backend/ent/postcontent_update.go new file mode 100644 index 0000000..002d4f9 --- /dev/null +++ b/backend/ent/postcontent_update.go @@ -0,0 +1,624 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContentUpdate is the builder for updating PostContent entities. +type PostContentUpdate struct { + config + hooks []Hook + mutation *PostContentMutation +} + +// Where appends a list predicates to the PostContentUpdate builder. +func (pcu *PostContentUpdate) Where(ps ...predicate.PostContent) *PostContentUpdate { + pcu.mutation.Where(ps...) + return pcu +} + +// SetLanguageCode sets the "language_code" field. +func (pcu *PostContentUpdate) SetLanguageCode(pc postcontent.LanguageCode) *PostContentUpdate { + pcu.mutation.SetLanguageCode(pc) + return pcu +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableLanguageCode(pc *postcontent.LanguageCode) *PostContentUpdate { + if pc != nil { + pcu.SetLanguageCode(*pc) + } + return pcu +} + +// SetTitle sets the "title" field. +func (pcu *PostContentUpdate) SetTitle(s string) *PostContentUpdate { + pcu.mutation.SetTitle(s) + return pcu +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableTitle(s *string) *PostContentUpdate { + if s != nil { + pcu.SetTitle(*s) + } + return pcu +} + +// SetContentMarkdown sets the "content_markdown" field. +func (pcu *PostContentUpdate) SetContentMarkdown(s string) *PostContentUpdate { + pcu.mutation.SetContentMarkdown(s) + return pcu +} + +// SetNillableContentMarkdown sets the "content_markdown" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableContentMarkdown(s *string) *PostContentUpdate { + if s != nil { + pcu.SetContentMarkdown(*s) + } + return pcu +} + +// SetSummary sets the "summary" field. +func (pcu *PostContentUpdate) SetSummary(s string) *PostContentUpdate { + pcu.mutation.SetSummary(s) + return pcu +} + +// SetNillableSummary sets the "summary" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableSummary(s *string) *PostContentUpdate { + if s != nil { + pcu.SetSummary(*s) + } + return pcu +} + +// SetMetaKeywords sets the "meta_keywords" field. +func (pcu *PostContentUpdate) SetMetaKeywords(s string) *PostContentUpdate { + pcu.mutation.SetMetaKeywords(s) + return pcu +} + +// SetNillableMetaKeywords sets the "meta_keywords" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableMetaKeywords(s *string) *PostContentUpdate { + if s != nil { + pcu.SetMetaKeywords(*s) + } + return pcu +} + +// ClearMetaKeywords clears the value of the "meta_keywords" field. +func (pcu *PostContentUpdate) ClearMetaKeywords() *PostContentUpdate { + pcu.mutation.ClearMetaKeywords() + return pcu +} + +// SetMetaDescription sets the "meta_description" field. +func (pcu *PostContentUpdate) SetMetaDescription(s string) *PostContentUpdate { + pcu.mutation.SetMetaDescription(s) + return pcu +} + +// SetNillableMetaDescription sets the "meta_description" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableMetaDescription(s *string) *PostContentUpdate { + if s != nil { + pcu.SetMetaDescription(*s) + } + return pcu +} + +// ClearMetaDescription clears the value of the "meta_description" field. +func (pcu *PostContentUpdate) ClearMetaDescription() *PostContentUpdate { + pcu.mutation.ClearMetaDescription() + return pcu +} + +// SetSlug sets the "slug" field. +func (pcu *PostContentUpdate) SetSlug(s string) *PostContentUpdate { + pcu.mutation.SetSlug(s) + return pcu +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (pcu *PostContentUpdate) SetNillableSlug(s *string) *PostContentUpdate { + if s != nil { + pcu.SetSlug(*s) + } + return pcu +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcu *PostContentUpdate) SetPostID(id int) *PostContentUpdate { + pcu.mutation.SetPostID(id) + return pcu +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcu *PostContentUpdate) SetNillablePostID(id *int) *PostContentUpdate { + if id != nil { + pcu = pcu.SetPostID(*id) + } + return pcu +} + +// SetPost sets the "post" edge to the Post entity. +func (pcu *PostContentUpdate) SetPost(p *Post) *PostContentUpdate { + return pcu.SetPostID(p.ID) +} + +// Mutation returns the PostContentMutation object of the builder. +func (pcu *PostContentUpdate) Mutation() *PostContentMutation { + return pcu.mutation +} + +// ClearPost clears the "post" edge to the Post entity. +func (pcu *PostContentUpdate) ClearPost() *PostContentUpdate { + pcu.mutation.ClearPost() + return pcu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pcu *PostContentUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, pcu.sqlSave, pcu.mutation, pcu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pcu *PostContentUpdate) SaveX(ctx context.Context) int { + affected, err := pcu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pcu *PostContentUpdate) Exec(ctx context.Context) error { + _, err := pcu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcu *PostContentUpdate) ExecX(ctx context.Context) { + if err := pcu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcu *PostContentUpdate) check() error { + if v, ok := pcu.mutation.LanguageCode(); ok { + if err := postcontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContent.language_code": %w`, err)} + } + } + if v, ok := pcu.mutation.Title(); ok { + if err := postcontent.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "PostContent.title": %w`, err)} + } + } + if v, ok := pcu.mutation.ContentMarkdown(); ok { + if err := postcontent.ContentMarkdownValidator(v); err != nil { + return &ValidationError{Name: "content_markdown", err: fmt.Errorf(`ent: validator failed for field "PostContent.content_markdown": %w`, err)} + } + } + if v, ok := pcu.mutation.Summary(); ok { + if err := postcontent.SummaryValidator(v); err != nil { + return &ValidationError{Name: "summary", err: fmt.Errorf(`ent: validator failed for field "PostContent.summary": %w`, err)} + } + } + if v, ok := pcu.mutation.Slug(); ok { + if err := postcontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "PostContent.slug": %w`, err)} + } + } + return nil +} + +func (pcu *PostContentUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := pcu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(postcontent.Table, postcontent.Columns, sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt)) + if ps := pcu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pcu.mutation.LanguageCode(); ok { + _spec.SetField(postcontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := pcu.mutation.Title(); ok { + _spec.SetField(postcontent.FieldTitle, field.TypeString, value) + } + if value, ok := pcu.mutation.ContentMarkdown(); ok { + _spec.SetField(postcontent.FieldContentMarkdown, field.TypeString, value) + } + if value, ok := pcu.mutation.Summary(); ok { + _spec.SetField(postcontent.FieldSummary, field.TypeString, value) + } + if value, ok := pcu.mutation.MetaKeywords(); ok { + _spec.SetField(postcontent.FieldMetaKeywords, field.TypeString, value) + } + if pcu.mutation.MetaKeywordsCleared() { + _spec.ClearField(postcontent.FieldMetaKeywords, field.TypeString) + } + if value, ok := pcu.mutation.MetaDescription(); ok { + _spec.SetField(postcontent.FieldMetaDescription, field.TypeString, value) + } + if pcu.mutation.MetaDescriptionCleared() { + _spec.ClearField(postcontent.FieldMetaDescription, field.TypeString) + } + if value, ok := pcu.mutation.Slug(); ok { + _spec.SetField(postcontent.FieldSlug, field.TypeString, value) + } + if pcu.mutation.PostCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontent.PostTable, + Columns: []string{postcontent.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcu.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontent.PostTable, + Columns: []string{postcontent.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, pcu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{postcontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pcu.mutation.done = true + return n, nil +} + +// PostContentUpdateOne is the builder for updating a single PostContent entity. +type PostContentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PostContentMutation +} + +// SetLanguageCode sets the "language_code" field. +func (pcuo *PostContentUpdateOne) SetLanguageCode(pc postcontent.LanguageCode) *PostContentUpdateOne { + pcuo.mutation.SetLanguageCode(pc) + return pcuo +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableLanguageCode(pc *postcontent.LanguageCode) *PostContentUpdateOne { + if pc != nil { + pcuo.SetLanguageCode(*pc) + } + return pcuo +} + +// SetTitle sets the "title" field. +func (pcuo *PostContentUpdateOne) SetTitle(s string) *PostContentUpdateOne { + pcuo.mutation.SetTitle(s) + return pcuo +} + +// SetNillableTitle sets the "title" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableTitle(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetTitle(*s) + } + return pcuo +} + +// SetContentMarkdown sets the "content_markdown" field. +func (pcuo *PostContentUpdateOne) SetContentMarkdown(s string) *PostContentUpdateOne { + pcuo.mutation.SetContentMarkdown(s) + return pcuo +} + +// SetNillableContentMarkdown sets the "content_markdown" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableContentMarkdown(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetContentMarkdown(*s) + } + return pcuo +} + +// SetSummary sets the "summary" field. +func (pcuo *PostContentUpdateOne) SetSummary(s string) *PostContentUpdateOne { + pcuo.mutation.SetSummary(s) + return pcuo +} + +// SetNillableSummary sets the "summary" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableSummary(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetSummary(*s) + } + return pcuo +} + +// SetMetaKeywords sets the "meta_keywords" field. +func (pcuo *PostContentUpdateOne) SetMetaKeywords(s string) *PostContentUpdateOne { + pcuo.mutation.SetMetaKeywords(s) + return pcuo +} + +// SetNillableMetaKeywords sets the "meta_keywords" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableMetaKeywords(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetMetaKeywords(*s) + } + return pcuo +} + +// ClearMetaKeywords clears the value of the "meta_keywords" field. +func (pcuo *PostContentUpdateOne) ClearMetaKeywords() *PostContentUpdateOne { + pcuo.mutation.ClearMetaKeywords() + return pcuo +} + +// SetMetaDescription sets the "meta_description" field. +func (pcuo *PostContentUpdateOne) SetMetaDescription(s string) *PostContentUpdateOne { + pcuo.mutation.SetMetaDescription(s) + return pcuo +} + +// SetNillableMetaDescription sets the "meta_description" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableMetaDescription(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetMetaDescription(*s) + } + return pcuo +} + +// ClearMetaDescription clears the value of the "meta_description" field. +func (pcuo *PostContentUpdateOne) ClearMetaDescription() *PostContentUpdateOne { + pcuo.mutation.ClearMetaDescription() + return pcuo +} + +// SetSlug sets the "slug" field. +func (pcuo *PostContentUpdateOne) SetSlug(s string) *PostContentUpdateOne { + pcuo.mutation.SetSlug(s) + return pcuo +} + +// SetNillableSlug sets the "slug" field if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillableSlug(s *string) *PostContentUpdateOne { + if s != nil { + pcuo.SetSlug(*s) + } + return pcuo +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcuo *PostContentUpdateOne) SetPostID(id int) *PostContentUpdateOne { + pcuo.mutation.SetPostID(id) + return pcuo +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcuo *PostContentUpdateOne) SetNillablePostID(id *int) *PostContentUpdateOne { + if id != nil { + pcuo = pcuo.SetPostID(*id) + } + return pcuo +} + +// SetPost sets the "post" edge to the Post entity. +func (pcuo *PostContentUpdateOne) SetPost(p *Post) *PostContentUpdateOne { + return pcuo.SetPostID(p.ID) +} + +// Mutation returns the PostContentMutation object of the builder. +func (pcuo *PostContentUpdateOne) Mutation() *PostContentMutation { + return pcuo.mutation +} + +// ClearPost clears the "post" edge to the Post entity. +func (pcuo *PostContentUpdateOne) ClearPost() *PostContentUpdateOne { + pcuo.mutation.ClearPost() + return pcuo +} + +// Where appends a list predicates to the PostContentUpdate builder. +func (pcuo *PostContentUpdateOne) Where(ps ...predicate.PostContent) *PostContentUpdateOne { + pcuo.mutation.Where(ps...) + return pcuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (pcuo *PostContentUpdateOne) Select(field string, fields ...string) *PostContentUpdateOne { + pcuo.fields = append([]string{field}, fields...) + return pcuo +} + +// Save executes the query and returns the updated PostContent entity. +func (pcuo *PostContentUpdateOne) Save(ctx context.Context) (*PostContent, error) { + return withHooks(ctx, pcuo.sqlSave, pcuo.mutation, pcuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pcuo *PostContentUpdateOne) SaveX(ctx context.Context) *PostContent { + node, err := pcuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (pcuo *PostContentUpdateOne) Exec(ctx context.Context) error { + _, err := pcuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcuo *PostContentUpdateOne) ExecX(ctx context.Context) { + if err := pcuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcuo *PostContentUpdateOne) check() error { + if v, ok := pcuo.mutation.LanguageCode(); ok { + if err := postcontent.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContent.language_code": %w`, err)} + } + } + if v, ok := pcuo.mutation.Title(); ok { + if err := postcontent.TitleValidator(v); err != nil { + return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "PostContent.title": %w`, err)} + } + } + if v, ok := pcuo.mutation.ContentMarkdown(); ok { + if err := postcontent.ContentMarkdownValidator(v); err != nil { + return &ValidationError{Name: "content_markdown", err: fmt.Errorf(`ent: validator failed for field "PostContent.content_markdown": %w`, err)} + } + } + if v, ok := pcuo.mutation.Summary(); ok { + if err := postcontent.SummaryValidator(v); err != nil { + return &ValidationError{Name: "summary", err: fmt.Errorf(`ent: validator failed for field "PostContent.summary": %w`, err)} + } + } + if v, ok := pcuo.mutation.Slug(); ok { + if err := postcontent.SlugValidator(v); err != nil { + return &ValidationError{Name: "slug", err: fmt.Errorf(`ent: validator failed for field "PostContent.slug": %w`, err)} + } + } + return nil +} + +func (pcuo *PostContentUpdateOne) sqlSave(ctx context.Context) (_node *PostContent, err error) { + if err := pcuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(postcontent.Table, postcontent.Columns, sqlgraph.NewFieldSpec(postcontent.FieldID, field.TypeInt)) + id, ok := pcuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PostContent.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := pcuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, postcontent.FieldID) + for _, f := range fields { + if !postcontent.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != postcontent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := pcuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pcuo.mutation.LanguageCode(); ok { + _spec.SetField(postcontent.FieldLanguageCode, field.TypeEnum, value) + } + if value, ok := pcuo.mutation.Title(); ok { + _spec.SetField(postcontent.FieldTitle, field.TypeString, value) + } + if value, ok := pcuo.mutation.ContentMarkdown(); ok { + _spec.SetField(postcontent.FieldContentMarkdown, field.TypeString, value) + } + if value, ok := pcuo.mutation.Summary(); ok { + _spec.SetField(postcontent.FieldSummary, field.TypeString, value) + } + if value, ok := pcuo.mutation.MetaKeywords(); ok { + _spec.SetField(postcontent.FieldMetaKeywords, field.TypeString, value) + } + if pcuo.mutation.MetaKeywordsCleared() { + _spec.ClearField(postcontent.FieldMetaKeywords, field.TypeString) + } + if value, ok := pcuo.mutation.MetaDescription(); ok { + _spec.SetField(postcontent.FieldMetaDescription, field.TypeString, value) + } + if pcuo.mutation.MetaDescriptionCleared() { + _spec.ClearField(postcontent.FieldMetaDescription, field.TypeString) + } + if value, ok := pcuo.mutation.Slug(); ok { + _spec.SetField(postcontent.FieldSlug, field.TypeString, value) + } + if pcuo.mutation.PostCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontent.PostTable, + Columns: []string{postcontent.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcuo.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontent.PostTable, + Columns: []string{postcontent.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PostContent{config: pcuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, pcuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{postcontent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + pcuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/postcontributor.go b/backend/ent/postcontributor.go new file mode 100644 index 0000000..f5c6eba --- /dev/null +++ b/backend/ent/postcontributor.go @@ -0,0 +1,217 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontributor" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// PostContributor is the model entity for the PostContributor schema. +type PostContributor struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // LanguageCode holds the value of the "language_code" field. + LanguageCode *postcontributor.LanguageCode `json:"language_code,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PostContributorQuery when eager-loading is set. + Edges PostContributorEdges `json:"edges"` + contributor_posts *int + contributor_role_post_contributors *int + post_contributors *int + selectValues sql.SelectValues +} + +// PostContributorEdges holds the relations/edges for other nodes in the graph. +type PostContributorEdges struct { + // Post holds the value of the post edge. + Post *Post `json:"post,omitempty"` + // Contributor holds the value of the contributor edge. + Contributor *Contributor `json:"contributor,omitempty"` + // Role holds the value of the role edge. + Role *ContributorRole `json:"role,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// PostOrErr returns the Post value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PostContributorEdges) PostOrErr() (*Post, error) { + if e.Post != nil { + return e.Post, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: post.Label} + } + return nil, &NotLoadedError{edge: "post"} +} + +// ContributorOrErr returns the Contributor value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PostContributorEdges) ContributorOrErr() (*Contributor, error) { + if e.Contributor != nil { + return e.Contributor, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: contributor.Label} + } + return nil, &NotLoadedError{edge: "contributor"} +} + +// RoleOrErr returns the Role value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PostContributorEdges) RoleOrErr() (*ContributorRole, error) { + if e.Role != nil { + return e.Role, nil + } else if e.loadedTypes[2] { + return nil, &NotFoundError{label: contributorrole.Label} + } + return nil, &NotLoadedError{edge: "role"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PostContributor) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case postcontributor.FieldID: + values[i] = new(sql.NullInt64) + case postcontributor.FieldLanguageCode: + values[i] = new(sql.NullString) + case postcontributor.FieldCreatedAt: + values[i] = new(sql.NullTime) + case postcontributor.ForeignKeys[0]: // contributor_posts + values[i] = new(sql.NullInt64) + case postcontributor.ForeignKeys[1]: // contributor_role_post_contributors + values[i] = new(sql.NullInt64) + case postcontributor.ForeignKeys[2]: // post_contributors + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PostContributor fields. +func (pc *PostContributor) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case postcontributor.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + pc.ID = int(value.Int64) + case postcontributor.FieldLanguageCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field language_code", values[i]) + } else if value.Valid { + pc.LanguageCode = new(postcontributor.LanguageCode) + *pc.LanguageCode = postcontributor.LanguageCode(value.String) + } + case postcontributor.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + pc.CreatedAt = value.Time + } + case postcontributor.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field contributor_posts", value) + } else if value.Valid { + pc.contributor_posts = new(int) + *pc.contributor_posts = int(value.Int64) + } + case postcontributor.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field contributor_role_post_contributors", value) + } else if value.Valid { + pc.contributor_role_post_contributors = new(int) + *pc.contributor_role_post_contributors = int(value.Int64) + } + case postcontributor.ForeignKeys[2]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field post_contributors", value) + } else if value.Valid { + pc.post_contributors = new(int) + *pc.post_contributors = int(value.Int64) + } + default: + pc.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PostContributor. +// This includes values selected through modifiers, order, etc. +func (pc *PostContributor) Value(name string) (ent.Value, error) { + return pc.selectValues.Get(name) +} + +// QueryPost queries the "post" edge of the PostContributor entity. +func (pc *PostContributor) QueryPost() *PostQuery { + return NewPostContributorClient(pc.config).QueryPost(pc) +} + +// QueryContributor queries the "contributor" edge of the PostContributor entity. +func (pc *PostContributor) QueryContributor() *ContributorQuery { + return NewPostContributorClient(pc.config).QueryContributor(pc) +} + +// QueryRole queries the "role" edge of the PostContributor entity. +func (pc *PostContributor) QueryRole() *ContributorRoleQuery { + return NewPostContributorClient(pc.config).QueryRole(pc) +} + +// Update returns a builder for updating this PostContributor. +// Note that you need to call PostContributor.Unwrap() before calling this method if this PostContributor +// was returned from a transaction, and the transaction was committed or rolled back. +func (pc *PostContributor) Update() *PostContributorUpdateOne { + return NewPostContributorClient(pc.config).UpdateOne(pc) +} + +// Unwrap unwraps the PostContributor entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (pc *PostContributor) Unwrap() *PostContributor { + _tx, ok := pc.config.driver.(*txDriver) + if !ok { + panic("ent: PostContributor is not a transactional entity") + } + pc.config.driver = _tx.drv + return pc +} + +// String implements the fmt.Stringer. +func (pc *PostContributor) String() string { + var builder strings.Builder + builder.WriteString("PostContributor(") + builder.WriteString(fmt.Sprintf("id=%v, ", pc.ID)) + if v := pc.LanguageCode; v != nil { + builder.WriteString("language_code=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(pc.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// PostContributors is a parsable slice of PostContributor. +type PostContributors []*PostContributor diff --git a/backend/ent/postcontributor/postcontributor.go b/backend/ent/postcontributor/postcontributor.go new file mode 100644 index 0000000..b2d5b3c --- /dev/null +++ b/backend/ent/postcontributor/postcontributor.go @@ -0,0 +1,170 @@ +// Code generated by ent, DO NOT EDIT. + +package postcontributor + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the postcontributor type in the database. + Label = "post_contributor" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldLanguageCode holds the string denoting the language_code field in the database. + FieldLanguageCode = "language_code" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgePost holds the string denoting the post edge name in mutations. + EdgePost = "post" + // EdgeContributor holds the string denoting the contributor edge name in mutations. + EdgeContributor = "contributor" + // EdgeRole holds the string denoting the role edge name in mutations. + EdgeRole = "role" + // Table holds the table name of the postcontributor in the database. + Table = "post_contributors" + // PostTable is the table that holds the post relation/edge. + PostTable = "post_contributors" + // PostInverseTable is the table name for the Post entity. + // It exists in this package in order to avoid circular dependency with the "post" package. + PostInverseTable = "posts" + // PostColumn is the table column denoting the post relation/edge. + PostColumn = "post_contributors" + // ContributorTable is the table that holds the contributor relation/edge. + ContributorTable = "post_contributors" + // ContributorInverseTable is the table name for the Contributor entity. + // It exists in this package in order to avoid circular dependency with the "contributor" package. + ContributorInverseTable = "contributors" + // ContributorColumn is the table column denoting the contributor relation/edge. + ContributorColumn = "contributor_posts" + // RoleTable is the table that holds the role relation/edge. + RoleTable = "post_contributors" + // RoleInverseTable is the table name for the ContributorRole entity. + // It exists in this package in order to avoid circular dependency with the "contributorrole" package. + RoleInverseTable = "contributor_roles" + // RoleColumn is the table column denoting the role relation/edge. + RoleColumn = "contributor_role_post_contributors" +) + +// Columns holds all SQL columns for postcontributor fields. +var Columns = []string{ + FieldID, + FieldLanguageCode, + FieldCreatedAt, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "post_contributors" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "contributor_posts", + "contributor_role_post_contributors", + "post_contributors", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// LanguageCode defines the type for the "language_code" enum field. +type LanguageCode string + +// LanguageCode values. +const ( + LanguageCodeEn LanguageCode = "en" + LanguageCodeZhHans LanguageCode = "zh-Hans" + LanguageCodeZhHant LanguageCode = "zh-Hant" +) + +func (lc LanguageCode) String() string { + return string(lc) +} + +// LanguageCodeValidator is a validator for the "language_code" field enum values. It is called by the builders before save. +func LanguageCodeValidator(lc LanguageCode) error { + switch lc { + case LanguageCodeEn, LanguageCodeZhHans, LanguageCodeZhHant: + return nil + default: + return fmt.Errorf("postcontributor: invalid enum value for language_code field: %q", lc) + } +} + +// OrderOption defines the ordering options for the PostContributor queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByLanguageCode orders the results by the language_code field. +func ByLanguageCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLanguageCode, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByPostField orders the results by post field. +func ByPostField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPostStep(), sql.OrderByField(field, opts...)) + } +} + +// ByContributorField orders the results by contributor field. +func ByContributorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContributorStep(), sql.OrderByField(field, opts...)) + } +} + +// ByRoleField orders the results by role field. +func ByRoleField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRoleStep(), sql.OrderByField(field, opts...)) + } +} +func newPostStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PostInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PostTable, PostColumn), + ) +} +func newContributorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContributorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ContributorTable, ContributorColumn), + ) +} +func newRoleStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RoleInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, RoleTable, RoleColumn), + ) +} diff --git a/backend/ent/postcontributor/where.go b/backend/ent/postcontributor/where.go new file mode 100644 index 0000000..acdf328 --- /dev/null +++ b/backend/ent/postcontributor/where.go @@ -0,0 +1,215 @@ +// Code generated by ent, DO NOT EDIT. + +package postcontributor + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.PostContributor { + return predicate.PostContributor(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// LanguageCodeEQ applies the EQ predicate on the "language_code" field. +func LanguageCodeEQ(v LanguageCode) predicate.PostContributor { + return predicate.PostContributor(sql.FieldEQ(FieldLanguageCode, v)) +} + +// LanguageCodeNEQ applies the NEQ predicate on the "language_code" field. +func LanguageCodeNEQ(v LanguageCode) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNEQ(FieldLanguageCode, v)) +} + +// LanguageCodeIn applies the In predicate on the "language_code" field. +func LanguageCodeIn(vs ...LanguageCode) predicate.PostContributor { + return predicate.PostContributor(sql.FieldIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeNotIn applies the NotIn predicate on the "language_code" field. +func LanguageCodeNotIn(vs ...LanguageCode) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNotIn(FieldLanguageCode, vs...)) +} + +// LanguageCodeIsNil applies the IsNil predicate on the "language_code" field. +func LanguageCodeIsNil() predicate.PostContributor { + return predicate.PostContributor(sql.FieldIsNull(FieldLanguageCode)) +} + +// LanguageCodeNotNil applies the NotNil predicate on the "language_code" field. +func LanguageCodeNotNil() predicate.PostContributor { + return predicate.PostContributor(sql.FieldNotNull(FieldLanguageCode)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.PostContributor { + return predicate.PostContributor(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasPost applies the HasEdge predicate on the "post" edge. +func HasPost() predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PostTable, PostColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPostWith applies the HasEdge predicate on the "post" edge with a given conditions (other predicates). +func HasPostWith(preds ...predicate.Post) predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := newPostStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasContributor applies the HasEdge predicate on the "contributor" edge. +func HasContributor() predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ContributorTable, ContributorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContributorWith applies the HasEdge predicate on the "contributor" edge with a given conditions (other predicates). +func HasContributorWith(preds ...predicate.Contributor) predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := newContributorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasRole applies the HasEdge predicate on the "role" edge. +func HasRole() predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, RoleTable, RoleColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRoleWith applies the HasEdge predicate on the "role" edge with a given conditions (other predicates). +func HasRoleWith(preds ...predicate.ContributorRole) predicate.PostContributor { + return predicate.PostContributor(func(s *sql.Selector) { + step := newRoleStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PostContributor) predicate.PostContributor { + return predicate.PostContributor(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PostContributor) predicate.PostContributor { + return predicate.PostContributor(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PostContributor) predicate.PostContributor { + return predicate.PostContributor(sql.NotPredicates(p)) +} diff --git a/backend/ent/postcontributor_create.go b/backend/ent/postcontributor_create.go new file mode 100644 index 0000000..4830cab --- /dev/null +++ b/backend/ent/postcontributor_create.go @@ -0,0 +1,336 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontributor" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContributorCreate is the builder for creating a PostContributor entity. +type PostContributorCreate struct { + config + mutation *PostContributorMutation + hooks []Hook +} + +// SetLanguageCode sets the "language_code" field. +func (pcc *PostContributorCreate) SetLanguageCode(pc postcontributor.LanguageCode) *PostContributorCreate { + pcc.mutation.SetLanguageCode(pc) + return pcc +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (pcc *PostContributorCreate) SetNillableLanguageCode(pc *postcontributor.LanguageCode) *PostContributorCreate { + if pc != nil { + pcc.SetLanguageCode(*pc) + } + return pcc +} + +// SetCreatedAt sets the "created_at" field. +func (pcc *PostContributorCreate) SetCreatedAt(t time.Time) *PostContributorCreate { + pcc.mutation.SetCreatedAt(t) + return pcc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pcc *PostContributorCreate) SetNillableCreatedAt(t *time.Time) *PostContributorCreate { + if t != nil { + pcc.SetCreatedAt(*t) + } + return pcc +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcc *PostContributorCreate) SetPostID(id int) *PostContributorCreate { + pcc.mutation.SetPostID(id) + return pcc +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcc *PostContributorCreate) SetNillablePostID(id *int) *PostContributorCreate { + if id != nil { + pcc = pcc.SetPostID(*id) + } + return pcc +} + +// SetPost sets the "post" edge to the Post entity. +func (pcc *PostContributorCreate) SetPost(p *Post) *PostContributorCreate { + return pcc.SetPostID(p.ID) +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (pcc *PostContributorCreate) SetContributorID(id int) *PostContributorCreate { + pcc.mutation.SetContributorID(id) + return pcc +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (pcc *PostContributorCreate) SetNillableContributorID(id *int) *PostContributorCreate { + if id != nil { + pcc = pcc.SetContributorID(*id) + } + return pcc +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (pcc *PostContributorCreate) SetContributor(c *Contributor) *PostContributorCreate { + return pcc.SetContributorID(c.ID) +} + +// SetRoleID sets the "role" edge to the ContributorRole entity by ID. +func (pcc *PostContributorCreate) SetRoleID(id int) *PostContributorCreate { + pcc.mutation.SetRoleID(id) + return pcc +} + +// SetNillableRoleID sets the "role" edge to the ContributorRole entity by ID if the given value is not nil. +func (pcc *PostContributorCreate) SetNillableRoleID(id *int) *PostContributorCreate { + if id != nil { + pcc = pcc.SetRoleID(*id) + } + return pcc +} + +// SetRole sets the "role" edge to the ContributorRole entity. +func (pcc *PostContributorCreate) SetRole(c *ContributorRole) *PostContributorCreate { + return pcc.SetRoleID(c.ID) +} + +// Mutation returns the PostContributorMutation object of the builder. +func (pcc *PostContributorCreate) Mutation() *PostContributorMutation { + return pcc.mutation +} + +// Save creates the PostContributor in the database. +func (pcc *PostContributorCreate) Save(ctx context.Context) (*PostContributor, error) { + pcc.defaults() + return withHooks(ctx, pcc.sqlSave, pcc.mutation, pcc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (pcc *PostContributorCreate) SaveX(ctx context.Context) *PostContributor { + v, err := pcc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pcc *PostContributorCreate) Exec(ctx context.Context) error { + _, err := pcc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcc *PostContributorCreate) ExecX(ctx context.Context) { + if err := pcc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (pcc *PostContributorCreate) defaults() { + if _, ok := pcc.mutation.CreatedAt(); !ok { + v := postcontributor.DefaultCreatedAt() + pcc.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcc *PostContributorCreate) check() error { + if v, ok := pcc.mutation.LanguageCode(); ok { + if err := postcontributor.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContributor.language_code": %w`, err)} + } + } + if _, ok := pcc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "PostContributor.created_at"`)} + } + return nil +} + +func (pcc *PostContributorCreate) sqlSave(ctx context.Context) (*PostContributor, error) { + if err := pcc.check(); err != nil { + return nil, err + } + _node, _spec := pcc.createSpec() + if err := sqlgraph.CreateNode(ctx, pcc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + pcc.mutation.id = &_node.ID + pcc.mutation.done = true + return _node, nil +} + +func (pcc *PostContributorCreate) createSpec() (*PostContributor, *sqlgraph.CreateSpec) { + var ( + _node = &PostContributor{config: pcc.config} + _spec = sqlgraph.NewCreateSpec(postcontributor.Table, sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt)) + ) + if value, ok := pcc.mutation.LanguageCode(); ok { + _spec.SetField(postcontributor.FieldLanguageCode, field.TypeEnum, value) + _node.LanguageCode = &value + } + if value, ok := pcc.mutation.CreatedAt(); ok { + _spec.SetField(postcontributor.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := pcc.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.PostTable, + Columns: []string{postcontributor.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.post_contributors = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pcc.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.ContributorTable, + Columns: []string{postcontributor.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.contributor_posts = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := pcc.mutation.RoleIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.RoleTable, + Columns: []string{postcontributor.RoleColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.contributor_role_post_contributors = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// PostContributorCreateBulk is the builder for creating many PostContributor entities in bulk. +type PostContributorCreateBulk struct { + config + err error + builders []*PostContributorCreate +} + +// Save creates the PostContributor entities in the database. +func (pccb *PostContributorCreateBulk) Save(ctx context.Context) ([]*PostContributor, error) { + if pccb.err != nil { + return nil, pccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(pccb.builders)) + nodes := make([]*PostContributor, len(pccb.builders)) + mutators := make([]Mutator, len(pccb.builders)) + for i := range pccb.builders { + func(i int, root context.Context) { + builder := pccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PostContributorMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, pccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, pccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, pccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (pccb *PostContributorCreateBulk) SaveX(ctx context.Context) []*PostContributor { + v, err := pccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (pccb *PostContributorCreateBulk) Exec(ctx context.Context) error { + _, err := pccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pccb *PostContributorCreateBulk) ExecX(ctx context.Context) { + if err := pccb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/postcontributor_delete.go b/backend/ent/postcontributor_delete.go new file mode 100644 index 0000000..7bd922a --- /dev/null +++ b/backend/ent/postcontributor_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContributorDelete is the builder for deleting a PostContributor entity. +type PostContributorDelete struct { + config + hooks []Hook + mutation *PostContributorMutation +} + +// Where appends a list predicates to the PostContributorDelete builder. +func (pcd *PostContributorDelete) Where(ps ...predicate.PostContributor) *PostContributorDelete { + pcd.mutation.Where(ps...) + return pcd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (pcd *PostContributorDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, pcd.sqlExec, pcd.mutation, pcd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcd *PostContributorDelete) ExecX(ctx context.Context) int { + n, err := pcd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (pcd *PostContributorDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(postcontributor.Table, sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt)) + if ps := pcd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, pcd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + pcd.mutation.done = true + return affected, err +} + +// PostContributorDeleteOne is the builder for deleting a single PostContributor entity. +type PostContributorDeleteOne struct { + pcd *PostContributorDelete +} + +// Where appends a list predicates to the PostContributorDelete builder. +func (pcdo *PostContributorDeleteOne) Where(ps ...predicate.PostContributor) *PostContributorDeleteOne { + pcdo.pcd.mutation.Where(ps...) + return pcdo +} + +// Exec executes the deletion query. +func (pcdo *PostContributorDeleteOne) Exec(ctx context.Context) error { + n, err := pcdo.pcd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{postcontributor.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcdo *PostContributorDeleteOne) ExecX(ctx context.Context) { + if err := pcdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/postcontributor_query.go b/backend/ent/postcontributor_query.go new file mode 100644 index 0000000..008b9d3 --- /dev/null +++ b/backend/ent/postcontributor_query.go @@ -0,0 +1,764 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContributorQuery is the builder for querying PostContributor entities. +type PostContributorQuery struct { + config + ctx *QueryContext + order []postcontributor.OrderOption + inters []Interceptor + predicates []predicate.PostContributor + withPost *PostQuery + withContributor *ContributorQuery + withRole *ContributorRoleQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PostContributorQuery builder. +func (pcq *PostContributorQuery) Where(ps ...predicate.PostContributor) *PostContributorQuery { + pcq.predicates = append(pcq.predicates, ps...) + return pcq +} + +// Limit the number of records to be returned by this query. +func (pcq *PostContributorQuery) Limit(limit int) *PostContributorQuery { + pcq.ctx.Limit = &limit + return pcq +} + +// Offset to start from. +func (pcq *PostContributorQuery) Offset(offset int) *PostContributorQuery { + pcq.ctx.Offset = &offset + return pcq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (pcq *PostContributorQuery) Unique(unique bool) *PostContributorQuery { + pcq.ctx.Unique = &unique + return pcq +} + +// Order specifies how the records should be ordered. +func (pcq *PostContributorQuery) Order(o ...postcontributor.OrderOption) *PostContributorQuery { + pcq.order = append(pcq.order, o...) + return pcq +} + +// QueryPost chains the current query on the "post" edge. +func (pcq *PostContributorQuery) QueryPost() *PostQuery { + query := (&PostClient{config: pcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, selector), + sqlgraph.To(post.Table, post.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.PostTable, postcontributor.PostColumn), + ) + fromU = sqlgraph.SetNeighbors(pcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryContributor chains the current query on the "contributor" edge. +func (pcq *PostContributorQuery) QueryContributor() *ContributorQuery { + query := (&ContributorClient{config: pcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, selector), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.ContributorTable, postcontributor.ContributorColumn), + ) + fromU = sqlgraph.SetNeighbors(pcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryRole chains the current query on the "role" edge. +func (pcq *PostContributorQuery) QueryRole() *ContributorRoleQuery { + query := (&ContributorRoleClient{config: pcq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := pcq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(postcontributor.Table, postcontributor.FieldID, selector), + sqlgraph.To(contributorrole.Table, contributorrole.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, postcontributor.RoleTable, postcontributor.RoleColumn), + ) + fromU = sqlgraph.SetNeighbors(pcq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PostContributor entity from the query. +// Returns a *NotFoundError when no PostContributor was found. +func (pcq *PostContributorQuery) First(ctx context.Context) (*PostContributor, error) { + nodes, err := pcq.Limit(1).All(setContextOp(ctx, pcq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{postcontributor.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (pcq *PostContributorQuery) FirstX(ctx context.Context) *PostContributor { + node, err := pcq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PostContributor ID from the query. +// Returns a *NotFoundError when no PostContributor ID was found. +func (pcq *PostContributorQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pcq.Limit(1).IDs(setContextOp(ctx, pcq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{postcontributor.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (pcq *PostContributorQuery) FirstIDX(ctx context.Context) int { + id, err := pcq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PostContributor entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PostContributor entity is found. +// Returns a *NotFoundError when no PostContributor entities are found. +func (pcq *PostContributorQuery) Only(ctx context.Context) (*PostContributor, error) { + nodes, err := pcq.Limit(2).All(setContextOp(ctx, pcq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{postcontributor.Label} + default: + return nil, &NotSingularError{postcontributor.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (pcq *PostContributorQuery) OnlyX(ctx context.Context) *PostContributor { + node, err := pcq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PostContributor ID in the query. +// Returns a *NotSingularError when more than one PostContributor ID is found. +// Returns a *NotFoundError when no entities are found. +func (pcq *PostContributorQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = pcq.Limit(2).IDs(setContextOp(ctx, pcq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{postcontributor.Label} + default: + err = &NotSingularError{postcontributor.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (pcq *PostContributorQuery) OnlyIDX(ctx context.Context) int { + id, err := pcq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PostContributors. +func (pcq *PostContributorQuery) All(ctx context.Context) ([]*PostContributor, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryAll) + if err := pcq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PostContributor, *PostContributorQuery]() + return withInterceptors[[]*PostContributor](ctx, pcq, qr, pcq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (pcq *PostContributorQuery) AllX(ctx context.Context) []*PostContributor { + nodes, err := pcq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PostContributor IDs. +func (pcq *PostContributorQuery) IDs(ctx context.Context) (ids []int, err error) { + if pcq.ctx.Unique == nil && pcq.path != nil { + pcq.Unique(true) + } + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryIDs) + if err = pcq.Select(postcontributor.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (pcq *PostContributorQuery) IDsX(ctx context.Context) []int { + ids, err := pcq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (pcq *PostContributorQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryCount) + if err := pcq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, pcq, querierCount[*PostContributorQuery](), pcq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (pcq *PostContributorQuery) CountX(ctx context.Context) int { + count, err := pcq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (pcq *PostContributorQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, pcq.ctx, ent.OpQueryExist) + switch _, err := pcq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (pcq *PostContributorQuery) ExistX(ctx context.Context) bool { + exist, err := pcq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PostContributorQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (pcq *PostContributorQuery) Clone() *PostContributorQuery { + if pcq == nil { + return nil + } + return &PostContributorQuery{ + config: pcq.config, + ctx: pcq.ctx.Clone(), + order: append([]postcontributor.OrderOption{}, pcq.order...), + inters: append([]Interceptor{}, pcq.inters...), + predicates: append([]predicate.PostContributor{}, pcq.predicates...), + withPost: pcq.withPost.Clone(), + withContributor: pcq.withContributor.Clone(), + withRole: pcq.withRole.Clone(), + // clone intermediate query. + sql: pcq.sql.Clone(), + path: pcq.path, + } +} + +// WithPost tells the query-builder to eager-load the nodes that are connected to +// the "post" edge. The optional arguments are used to configure the query builder of the edge. +func (pcq *PostContributorQuery) WithPost(opts ...func(*PostQuery)) *PostContributorQuery { + query := (&PostClient{config: pcq.config}).Query() + for _, opt := range opts { + opt(query) + } + pcq.withPost = query + return pcq +} + +// WithContributor tells the query-builder to eager-load the nodes that are connected to +// the "contributor" edge. The optional arguments are used to configure the query builder of the edge. +func (pcq *PostContributorQuery) WithContributor(opts ...func(*ContributorQuery)) *PostContributorQuery { + query := (&ContributorClient{config: pcq.config}).Query() + for _, opt := range opts { + opt(query) + } + pcq.withContributor = query + return pcq +} + +// WithRole tells the query-builder to eager-load the nodes that are connected to +// the "role" edge. The optional arguments are used to configure the query builder of the edge. +func (pcq *PostContributorQuery) WithRole(opts ...func(*ContributorRoleQuery)) *PostContributorQuery { + query := (&ContributorRoleClient{config: pcq.config}).Query() + for _, opt := range opts { + opt(query) + } + pcq.withRole = query + return pcq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// LanguageCode postcontributor.LanguageCode `json:"language_code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PostContributor.Query(). +// GroupBy(postcontributor.FieldLanguageCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (pcq *PostContributorQuery) GroupBy(field string, fields ...string) *PostContributorGroupBy { + pcq.ctx.Fields = append([]string{field}, fields...) + grbuild := &PostContributorGroupBy{build: pcq} + grbuild.flds = &pcq.ctx.Fields + grbuild.label = postcontributor.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// LanguageCode postcontributor.LanguageCode `json:"language_code,omitempty"` +// } +// +// client.PostContributor.Query(). +// Select(postcontributor.FieldLanguageCode). +// Scan(ctx, &v) +func (pcq *PostContributorQuery) Select(fields ...string) *PostContributorSelect { + pcq.ctx.Fields = append(pcq.ctx.Fields, fields...) + sbuild := &PostContributorSelect{PostContributorQuery: pcq} + sbuild.label = postcontributor.Label + sbuild.flds, sbuild.scan = &pcq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PostContributorSelect configured with the given aggregations. +func (pcq *PostContributorQuery) Aggregate(fns ...AggregateFunc) *PostContributorSelect { + return pcq.Select().Aggregate(fns...) +} + +func (pcq *PostContributorQuery) prepareQuery(ctx context.Context) error { + for _, inter := range pcq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, pcq); err != nil { + return err + } + } + } + for _, f := range pcq.ctx.Fields { + if !postcontributor.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if pcq.path != nil { + prev, err := pcq.path(ctx) + if err != nil { + return err + } + pcq.sql = prev + } + return nil +} + +func (pcq *PostContributorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PostContributor, error) { + var ( + nodes = []*PostContributor{} + withFKs = pcq.withFKs + _spec = pcq.querySpec() + loadedTypes = [3]bool{ + pcq.withPost != nil, + pcq.withContributor != nil, + pcq.withRole != nil, + } + ) + if pcq.withPost != nil || pcq.withContributor != nil || pcq.withRole != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, postcontributor.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PostContributor).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PostContributor{config: pcq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, pcq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := pcq.withPost; query != nil { + if err := pcq.loadPost(ctx, query, nodes, nil, + func(n *PostContributor, e *Post) { n.Edges.Post = e }); err != nil { + return nil, err + } + } + if query := pcq.withContributor; query != nil { + if err := pcq.loadContributor(ctx, query, nodes, nil, + func(n *PostContributor, e *Contributor) { n.Edges.Contributor = e }); err != nil { + return nil, err + } + } + if query := pcq.withRole; query != nil { + if err := pcq.loadRole(ctx, query, nodes, nil, + func(n *PostContributor, e *ContributorRole) { n.Edges.Role = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (pcq *PostContributorQuery) loadPost(ctx context.Context, query *PostQuery, nodes []*PostContributor, init func(*PostContributor), assign func(*PostContributor, *Post)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*PostContributor) + for i := range nodes { + if nodes[i].post_contributors == nil { + continue + } + fk := *nodes[i].post_contributors + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(post.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "post_contributors" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (pcq *PostContributorQuery) loadContributor(ctx context.Context, query *ContributorQuery, nodes []*PostContributor, init func(*PostContributor), assign func(*PostContributor, *Contributor)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*PostContributor) + for i := range nodes { + if nodes[i].contributor_posts == nil { + continue + } + fk := *nodes[i].contributor_posts + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(contributor.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "contributor_posts" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (pcq *PostContributorQuery) loadRole(ctx context.Context, query *ContributorRoleQuery, nodes []*PostContributor, init func(*PostContributor), assign func(*PostContributor, *ContributorRole)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*PostContributor) + for i := range nodes { + if nodes[i].contributor_role_post_contributors == nil { + continue + } + fk := *nodes[i].contributor_role_post_contributors + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(contributorrole.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "contributor_role_post_contributors" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (pcq *PostContributorQuery) sqlCount(ctx context.Context) (int, error) { + _spec := pcq.querySpec() + _spec.Node.Columns = pcq.ctx.Fields + if len(pcq.ctx.Fields) > 0 { + _spec.Unique = pcq.ctx.Unique != nil && *pcq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, pcq.driver, _spec) +} + +func (pcq *PostContributorQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(postcontributor.Table, postcontributor.Columns, sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt)) + _spec.From = pcq.sql + if unique := pcq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if pcq.path != nil { + _spec.Unique = true + } + if fields := pcq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, postcontributor.FieldID) + for i := range fields { + if fields[i] != postcontributor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := pcq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := pcq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := pcq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := pcq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (pcq *PostContributorQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(pcq.driver.Dialect()) + t1 := builder.Table(postcontributor.Table) + columns := pcq.ctx.Fields + if len(columns) == 0 { + columns = postcontributor.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if pcq.sql != nil { + selector = pcq.sql + selector.Select(selector.Columns(columns...)...) + } + if pcq.ctx.Unique != nil && *pcq.ctx.Unique { + selector.Distinct() + } + for _, p := range pcq.predicates { + p(selector) + } + for _, p := range pcq.order { + p(selector) + } + if offset := pcq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := pcq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// PostContributorGroupBy is the group-by builder for PostContributor entities. +type PostContributorGroupBy struct { + selector + build *PostContributorQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (pcgb *PostContributorGroupBy) Aggregate(fns ...AggregateFunc) *PostContributorGroupBy { + pcgb.fns = append(pcgb.fns, fns...) + return pcgb +} + +// Scan applies the selector query and scans the result into the given value. +func (pcgb *PostContributorGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pcgb.build.ctx, ent.OpQueryGroupBy) + if err := pcgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostContributorQuery, *PostContributorGroupBy](ctx, pcgb.build, pcgb, pcgb.build.inters, v) +} + +func (pcgb *PostContributorGroupBy) sqlScan(ctx context.Context, root *PostContributorQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(pcgb.fns)) + for _, fn := range pcgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*pcgb.flds)+len(pcgb.fns)) + for _, f := range *pcgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*pcgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pcgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PostContributorSelect is the builder for selecting fields of PostContributor entities. +type PostContributorSelect struct { + *PostContributorQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (pcs *PostContributorSelect) Aggregate(fns ...AggregateFunc) *PostContributorSelect { + pcs.fns = append(pcs.fns, fns...) + return pcs +} + +// Scan applies the selector query and scans the result into the given value. +func (pcs *PostContributorSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, pcs.ctx, ent.OpQuerySelect) + if err := pcs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PostContributorQuery, *PostContributorSelect](ctx, pcs.PostContributorQuery, pcs, pcs.inters, v) +} + +func (pcs *PostContributorSelect) sqlScan(ctx context.Context, root *PostContributorQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(pcs.fns)) + for _, fn := range pcs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*pcs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := pcs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/postcontributor_update.go b/backend/ent/postcontributor_update.go new file mode 100644 index 0000000..b7763f8 --- /dev/null +++ b/backend/ent/postcontributor_update.go @@ -0,0 +1,615 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// PostContributorUpdate is the builder for updating PostContributor entities. +type PostContributorUpdate struct { + config + hooks []Hook + mutation *PostContributorMutation +} + +// Where appends a list predicates to the PostContributorUpdate builder. +func (pcu *PostContributorUpdate) Where(ps ...predicate.PostContributor) *PostContributorUpdate { + pcu.mutation.Where(ps...) + return pcu +} + +// SetLanguageCode sets the "language_code" field. +func (pcu *PostContributorUpdate) SetLanguageCode(pc postcontributor.LanguageCode) *PostContributorUpdate { + pcu.mutation.SetLanguageCode(pc) + return pcu +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (pcu *PostContributorUpdate) SetNillableLanguageCode(pc *postcontributor.LanguageCode) *PostContributorUpdate { + if pc != nil { + pcu.SetLanguageCode(*pc) + } + return pcu +} + +// ClearLanguageCode clears the value of the "language_code" field. +func (pcu *PostContributorUpdate) ClearLanguageCode() *PostContributorUpdate { + pcu.mutation.ClearLanguageCode() + return pcu +} + +// SetCreatedAt sets the "created_at" field. +func (pcu *PostContributorUpdate) SetCreatedAt(t time.Time) *PostContributorUpdate { + pcu.mutation.SetCreatedAt(t) + return pcu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pcu *PostContributorUpdate) SetNillableCreatedAt(t *time.Time) *PostContributorUpdate { + if t != nil { + pcu.SetCreatedAt(*t) + } + return pcu +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcu *PostContributorUpdate) SetPostID(id int) *PostContributorUpdate { + pcu.mutation.SetPostID(id) + return pcu +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcu *PostContributorUpdate) SetNillablePostID(id *int) *PostContributorUpdate { + if id != nil { + pcu = pcu.SetPostID(*id) + } + return pcu +} + +// SetPost sets the "post" edge to the Post entity. +func (pcu *PostContributorUpdate) SetPost(p *Post) *PostContributorUpdate { + return pcu.SetPostID(p.ID) +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (pcu *PostContributorUpdate) SetContributorID(id int) *PostContributorUpdate { + pcu.mutation.SetContributorID(id) + return pcu +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (pcu *PostContributorUpdate) SetNillableContributorID(id *int) *PostContributorUpdate { + if id != nil { + pcu = pcu.SetContributorID(*id) + } + return pcu +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (pcu *PostContributorUpdate) SetContributor(c *Contributor) *PostContributorUpdate { + return pcu.SetContributorID(c.ID) +} + +// SetRoleID sets the "role" edge to the ContributorRole entity by ID. +func (pcu *PostContributorUpdate) SetRoleID(id int) *PostContributorUpdate { + pcu.mutation.SetRoleID(id) + return pcu +} + +// SetNillableRoleID sets the "role" edge to the ContributorRole entity by ID if the given value is not nil. +func (pcu *PostContributorUpdate) SetNillableRoleID(id *int) *PostContributorUpdate { + if id != nil { + pcu = pcu.SetRoleID(*id) + } + return pcu +} + +// SetRole sets the "role" edge to the ContributorRole entity. +func (pcu *PostContributorUpdate) SetRole(c *ContributorRole) *PostContributorUpdate { + return pcu.SetRoleID(c.ID) +} + +// Mutation returns the PostContributorMutation object of the builder. +func (pcu *PostContributorUpdate) Mutation() *PostContributorMutation { + return pcu.mutation +} + +// ClearPost clears the "post" edge to the Post entity. +func (pcu *PostContributorUpdate) ClearPost() *PostContributorUpdate { + pcu.mutation.ClearPost() + return pcu +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (pcu *PostContributorUpdate) ClearContributor() *PostContributorUpdate { + pcu.mutation.ClearContributor() + return pcu +} + +// ClearRole clears the "role" edge to the ContributorRole entity. +func (pcu *PostContributorUpdate) ClearRole() *PostContributorUpdate { + pcu.mutation.ClearRole() + return pcu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (pcu *PostContributorUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, pcu.sqlSave, pcu.mutation, pcu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pcu *PostContributorUpdate) SaveX(ctx context.Context) int { + affected, err := pcu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (pcu *PostContributorUpdate) Exec(ctx context.Context) error { + _, err := pcu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcu *PostContributorUpdate) ExecX(ctx context.Context) { + if err := pcu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcu *PostContributorUpdate) check() error { + if v, ok := pcu.mutation.LanguageCode(); ok { + if err := postcontributor.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContributor.language_code": %w`, err)} + } + } + return nil +} + +func (pcu *PostContributorUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := pcu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(postcontributor.Table, postcontributor.Columns, sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt)) + if ps := pcu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pcu.mutation.LanguageCode(); ok { + _spec.SetField(postcontributor.FieldLanguageCode, field.TypeEnum, value) + } + if pcu.mutation.LanguageCodeCleared() { + _spec.ClearField(postcontributor.FieldLanguageCode, field.TypeEnum) + } + if value, ok := pcu.mutation.CreatedAt(); ok { + _spec.SetField(postcontributor.FieldCreatedAt, field.TypeTime, value) + } + if pcu.mutation.PostCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.PostTable, + Columns: []string{postcontributor.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcu.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.PostTable, + Columns: []string{postcontributor.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pcu.mutation.ContributorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.ContributorTable, + Columns: []string{postcontributor.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcu.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.ContributorTable, + Columns: []string{postcontributor.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pcu.mutation.RoleCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.RoleTable, + Columns: []string{postcontributor.RoleColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcu.mutation.RoleIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.RoleTable, + Columns: []string{postcontributor.RoleColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, pcu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{postcontributor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + pcu.mutation.done = true + return n, nil +} + +// PostContributorUpdateOne is the builder for updating a single PostContributor entity. +type PostContributorUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PostContributorMutation +} + +// SetLanguageCode sets the "language_code" field. +func (pcuo *PostContributorUpdateOne) SetLanguageCode(pc postcontributor.LanguageCode) *PostContributorUpdateOne { + pcuo.mutation.SetLanguageCode(pc) + return pcuo +} + +// SetNillableLanguageCode sets the "language_code" field if the given value is not nil. +func (pcuo *PostContributorUpdateOne) SetNillableLanguageCode(pc *postcontributor.LanguageCode) *PostContributorUpdateOne { + if pc != nil { + pcuo.SetLanguageCode(*pc) + } + return pcuo +} + +// ClearLanguageCode clears the value of the "language_code" field. +func (pcuo *PostContributorUpdateOne) ClearLanguageCode() *PostContributorUpdateOne { + pcuo.mutation.ClearLanguageCode() + return pcuo +} + +// SetCreatedAt sets the "created_at" field. +func (pcuo *PostContributorUpdateOne) SetCreatedAt(t time.Time) *PostContributorUpdateOne { + pcuo.mutation.SetCreatedAt(t) + return pcuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (pcuo *PostContributorUpdateOne) SetNillableCreatedAt(t *time.Time) *PostContributorUpdateOne { + if t != nil { + pcuo.SetCreatedAt(*t) + } + return pcuo +} + +// SetPostID sets the "post" edge to the Post entity by ID. +func (pcuo *PostContributorUpdateOne) SetPostID(id int) *PostContributorUpdateOne { + pcuo.mutation.SetPostID(id) + return pcuo +} + +// SetNillablePostID sets the "post" edge to the Post entity by ID if the given value is not nil. +func (pcuo *PostContributorUpdateOne) SetNillablePostID(id *int) *PostContributorUpdateOne { + if id != nil { + pcuo = pcuo.SetPostID(*id) + } + return pcuo +} + +// SetPost sets the "post" edge to the Post entity. +func (pcuo *PostContributorUpdateOne) SetPost(p *Post) *PostContributorUpdateOne { + return pcuo.SetPostID(p.ID) +} + +// SetContributorID sets the "contributor" edge to the Contributor entity by ID. +func (pcuo *PostContributorUpdateOne) SetContributorID(id int) *PostContributorUpdateOne { + pcuo.mutation.SetContributorID(id) + return pcuo +} + +// SetNillableContributorID sets the "contributor" edge to the Contributor entity by ID if the given value is not nil. +func (pcuo *PostContributorUpdateOne) SetNillableContributorID(id *int) *PostContributorUpdateOne { + if id != nil { + pcuo = pcuo.SetContributorID(*id) + } + return pcuo +} + +// SetContributor sets the "contributor" edge to the Contributor entity. +func (pcuo *PostContributorUpdateOne) SetContributor(c *Contributor) *PostContributorUpdateOne { + return pcuo.SetContributorID(c.ID) +} + +// SetRoleID sets the "role" edge to the ContributorRole entity by ID. +func (pcuo *PostContributorUpdateOne) SetRoleID(id int) *PostContributorUpdateOne { + pcuo.mutation.SetRoleID(id) + return pcuo +} + +// SetNillableRoleID sets the "role" edge to the ContributorRole entity by ID if the given value is not nil. +func (pcuo *PostContributorUpdateOne) SetNillableRoleID(id *int) *PostContributorUpdateOne { + if id != nil { + pcuo = pcuo.SetRoleID(*id) + } + return pcuo +} + +// SetRole sets the "role" edge to the ContributorRole entity. +func (pcuo *PostContributorUpdateOne) SetRole(c *ContributorRole) *PostContributorUpdateOne { + return pcuo.SetRoleID(c.ID) +} + +// Mutation returns the PostContributorMutation object of the builder. +func (pcuo *PostContributorUpdateOne) Mutation() *PostContributorMutation { + return pcuo.mutation +} + +// ClearPost clears the "post" edge to the Post entity. +func (pcuo *PostContributorUpdateOne) ClearPost() *PostContributorUpdateOne { + pcuo.mutation.ClearPost() + return pcuo +} + +// ClearContributor clears the "contributor" edge to the Contributor entity. +func (pcuo *PostContributorUpdateOne) ClearContributor() *PostContributorUpdateOne { + pcuo.mutation.ClearContributor() + return pcuo +} + +// ClearRole clears the "role" edge to the ContributorRole entity. +func (pcuo *PostContributorUpdateOne) ClearRole() *PostContributorUpdateOne { + pcuo.mutation.ClearRole() + return pcuo +} + +// Where appends a list predicates to the PostContributorUpdate builder. +func (pcuo *PostContributorUpdateOne) Where(ps ...predicate.PostContributor) *PostContributorUpdateOne { + pcuo.mutation.Where(ps...) + return pcuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (pcuo *PostContributorUpdateOne) Select(field string, fields ...string) *PostContributorUpdateOne { + pcuo.fields = append([]string{field}, fields...) + return pcuo +} + +// Save executes the query and returns the updated PostContributor entity. +func (pcuo *PostContributorUpdateOne) Save(ctx context.Context) (*PostContributor, error) { + return withHooks(ctx, pcuo.sqlSave, pcuo.mutation, pcuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (pcuo *PostContributorUpdateOne) SaveX(ctx context.Context) *PostContributor { + node, err := pcuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (pcuo *PostContributorUpdateOne) Exec(ctx context.Context) error { + _, err := pcuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (pcuo *PostContributorUpdateOne) ExecX(ctx context.Context) { + if err := pcuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (pcuo *PostContributorUpdateOne) check() error { + if v, ok := pcuo.mutation.LanguageCode(); ok { + if err := postcontributor.LanguageCodeValidator(v); err != nil { + return &ValidationError{Name: "language_code", err: fmt.Errorf(`ent: validator failed for field "PostContributor.language_code": %w`, err)} + } + } + return nil +} + +func (pcuo *PostContributorUpdateOne) sqlSave(ctx context.Context) (_node *PostContributor, err error) { + if err := pcuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(postcontributor.Table, postcontributor.Columns, sqlgraph.NewFieldSpec(postcontributor.FieldID, field.TypeInt)) + id, ok := pcuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PostContributor.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := pcuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, postcontributor.FieldID) + for _, f := range fields { + if !postcontributor.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != postcontributor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := pcuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := pcuo.mutation.LanguageCode(); ok { + _spec.SetField(postcontributor.FieldLanguageCode, field.TypeEnum, value) + } + if pcuo.mutation.LanguageCodeCleared() { + _spec.ClearField(postcontributor.FieldLanguageCode, field.TypeEnum) + } + if value, ok := pcuo.mutation.CreatedAt(); ok { + _spec.SetField(postcontributor.FieldCreatedAt, field.TypeTime, value) + } + if pcuo.mutation.PostCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.PostTable, + Columns: []string{postcontributor.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcuo.mutation.PostIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.PostTable, + Columns: []string{postcontributor.PostColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(post.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pcuo.mutation.ContributorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.ContributorTable, + Columns: []string{postcontributor.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcuo.mutation.ContributorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.ContributorTable, + Columns: []string{postcontributor.ContributorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if pcuo.mutation.RoleCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.RoleTable, + Columns: []string{postcontributor.RoleColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := pcuo.mutation.RoleIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: postcontributor.RoleTable, + Columns: []string{postcontributor.RoleColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributorrole.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PostContributor{config: pcuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, pcuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{postcontributor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + pcuo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go new file mode 100644 index 0000000..fc2a60b --- /dev/null +++ b/backend/ent/predicate/predicate.go @@ -0,0 +1,55 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Category is the predicate function for category builders. +type Category func(*sql.Selector) + +// CategoryContent is the predicate function for categorycontent builders. +type CategoryContent func(*sql.Selector) + +// Contributor is the predicate function for contributor builders. +type Contributor func(*sql.Selector) + +// ContributorRole is the predicate function for contributorrole builders. +type ContributorRole func(*sql.Selector) + +// ContributorSocialLink is the predicate function for contributorsociallink builders. +type ContributorSocialLink func(*sql.Selector) + +// Daily is the predicate function for daily builders. +type Daily func(*sql.Selector) + +// DailyCategory is the predicate function for dailycategory builders. +type DailyCategory func(*sql.Selector) + +// DailyCategoryContent is the predicate function for dailycategorycontent builders. +type DailyCategoryContent func(*sql.Selector) + +// DailyContent is the predicate function for dailycontent builders. +type DailyContent func(*sql.Selector) + +// Media is the predicate function for media builders. +type Media func(*sql.Selector) + +// Permission is the predicate function for permission builders. +type Permission func(*sql.Selector) + +// Post is the predicate function for post builders. +type Post func(*sql.Selector) + +// PostContent is the predicate function for postcontent builders. +type PostContent func(*sql.Selector) + +// PostContributor is the predicate function for postcontributor builders. +type PostContributor func(*sql.Selector) + +// Role is the predicate function for role builders. +type Role func(*sql.Selector) + +// User is the predicate function for user builders. +type User func(*sql.Selector) diff --git a/backend/ent/role.go b/backend/ent/role.go new file mode 100644 index 0000000..e502bab --- /dev/null +++ b/backend/ent/role.go @@ -0,0 +1,181 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/role" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// Role is the model entity for the Role schema. +type Role struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Role name, e.g., 'admin', 'editor' + Name string `json:"name,omitempty"` + // Human readable description of the role + Description string `json:"description,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the RoleQuery when eager-loading is set. + Edges RoleEdges `json:"edges"` + selectValues sql.SelectValues +} + +// RoleEdges holds the relations/edges for other nodes in the graph. +type RoleEdges struct { + // Permissions holds the value of the permissions edge. + Permissions []*Permission `json:"permissions,omitempty"` + // Users holds the value of the users edge. + Users []*User `json:"users,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// PermissionsOrErr returns the Permissions value or an error if the edge +// was not loaded in eager-loading. +func (e RoleEdges) PermissionsOrErr() ([]*Permission, error) { + if e.loadedTypes[0] { + return e.Permissions, nil + } + return nil, &NotLoadedError{edge: "permissions"} +} + +// UsersOrErr returns the Users value or an error if the edge +// was not loaded in eager-loading. +func (e RoleEdges) UsersOrErr() ([]*User, error) { + if e.loadedTypes[1] { + return e.Users, nil + } + return nil, &NotLoadedError{edge: "users"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Role) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case role.FieldID: + values[i] = new(sql.NullInt64) + case role.FieldName, role.FieldDescription: + values[i] = new(sql.NullString) + case role.FieldCreatedAt, role.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Role fields. +func (r *Role) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case role.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + r.ID = int(value.Int64) + case role.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + r.Name = value.String + } + case role.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + r.Description = value.String + } + case role.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + r.CreatedAt = value.Time + } + case role.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + r.UpdatedAt = value.Time + } + default: + r.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Role. +// This includes values selected through modifiers, order, etc. +func (r *Role) Value(name string) (ent.Value, error) { + return r.selectValues.Get(name) +} + +// QueryPermissions queries the "permissions" edge of the Role entity. +func (r *Role) QueryPermissions() *PermissionQuery { + return NewRoleClient(r.config).QueryPermissions(r) +} + +// QueryUsers queries the "users" edge of the Role entity. +func (r *Role) QueryUsers() *UserQuery { + return NewRoleClient(r.config).QueryUsers(r) +} + +// Update returns a builder for updating this Role. +// Note that you need to call Role.Unwrap() before calling this method if this Role +// was returned from a transaction, and the transaction was committed or rolled back. +func (r *Role) Update() *RoleUpdateOne { + return NewRoleClient(r.config).UpdateOne(r) +} + +// Unwrap unwraps the Role entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (r *Role) Unwrap() *Role { + _tx, ok := r.config.driver.(*txDriver) + if !ok { + panic("ent: Role is not a transactional entity") + } + r.config.driver = _tx.drv + return r +} + +// String implements the fmt.Stringer. +func (r *Role) String() string { + var builder strings.Builder + builder.WriteString("Role(") + builder.WriteString(fmt.Sprintf("id=%v, ", r.ID)) + builder.WriteString("name=") + builder.WriteString(r.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(r.Description) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(r.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(r.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Roles is a parsable slice of Role. +type Roles []*Role diff --git a/backend/ent/role/role.go b/backend/ent/role/role.go new file mode 100644 index 0000000..c281a04 --- /dev/null +++ b/backend/ent/role/role.go @@ -0,0 +1,150 @@ +// Code generated by ent, DO NOT EDIT. + +package role + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the role type in the database. + Label = "role" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgePermissions holds the string denoting the permissions edge name in mutations. + EdgePermissions = "permissions" + // EdgeUsers holds the string denoting the users edge name in mutations. + EdgeUsers = "users" + // Table holds the table name of the role in the database. + Table = "roles" + // PermissionsTable is the table that holds the permissions relation/edge. The primary key declared below. + PermissionsTable = "role_permissions" + // PermissionsInverseTable is the table name for the Permission entity. + // It exists in this package in order to avoid circular dependency with the "permission" package. + PermissionsInverseTable = "permissions" + // UsersTable is the table that holds the users relation/edge. The primary key declared below. + UsersTable = "user_roles" + // UsersInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UsersInverseTable = "users" +) + +// Columns holds all SQL columns for role fields. +var Columns = []string{ + FieldID, + FieldName, + FieldDescription, + FieldCreatedAt, + FieldUpdatedAt, +} + +var ( + // PermissionsPrimaryKey and PermissionsColumn2 are the table columns denoting the + // primary key for the permissions relation (M2M). + PermissionsPrimaryKey = []string{"role_id", "permission_id"} + // UsersPrimaryKey and UsersColumn2 are the table columns denoting the + // primary key for the users relation (M2M). + UsersPrimaryKey = []string{"user_id", "role_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Role queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByPermissionsCount orders the results by permissions count. +func ByPermissionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPermissionsStep(), opts...) + } +} + +// ByPermissions orders the results by permissions terms. +func ByPermissions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPermissionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUsersCount orders the results by users count. +func ByUsersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...) + } +} + +// ByUsers orders the results by users terms. +func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newPermissionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PermissionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, PermissionsTable, PermissionsPrimaryKey...), + ) +} +func newUsersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, UsersTable, UsersPrimaryKey...), + ) +} diff --git a/backend/ent/role/where.go b/backend/ent/role/where.go new file mode 100644 index 0000000..40efd79 --- /dev/null +++ b/backend/ent/role/where.go @@ -0,0 +1,357 @@ +// Code generated by ent, DO NOT EDIT. + +package role + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Role { + return predicate.Role(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Role { + return predicate.Role(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Role { + return predicate.Role(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Role { + return predicate.Role(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Role { + return predicate.Role(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Role { + return predicate.Role(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Role { + return predicate.Role(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldDescription, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Role { + return predicate.Role(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Role { + return predicate.Role(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Role { + return predicate.Role(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Role { + return predicate.Role(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Role { + return predicate.Role(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Role { + return predicate.Role(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Role { + return predicate.Role(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Role { + return predicate.Role(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Role { + return predicate.Role(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Role { + return predicate.Role(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Role { + return predicate.Role(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Role { + return predicate.Role(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Role { + return predicate.Role(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Role { + return predicate.Role(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Role { + return predicate.Role(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Role { + return predicate.Role(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Role { + return predicate.Role(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Role { + return predicate.Role(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Role { + return predicate.Role(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Role { + return predicate.Role(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Role { + return predicate.Role(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Role { + return predicate.Role(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Role { + return predicate.Role(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Role { + return predicate.Role(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Role { + return predicate.Role(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Role { + return predicate.Role(sql.FieldContainsFold(FieldDescription, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Role { + return predicate.Role(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Role { + return predicate.Role(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Role { + return predicate.Role(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Role { + return predicate.Role(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Role { + return predicate.Role(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Role { + return predicate.Role(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Role { + return predicate.Role(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Role { + return predicate.Role(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Role { + return predicate.Role(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Role { + return predicate.Role(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Role { + return predicate.Role(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Role { + return predicate.Role(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Role { + return predicate.Role(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Role { + return predicate.Role(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Role { + return predicate.Role(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasPermissions applies the HasEdge predicate on the "permissions" edge. +func HasPermissions() predicate.Role { + return predicate.Role(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, PermissionsTable, PermissionsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPermissionsWith applies the HasEdge predicate on the "permissions" edge with a given conditions (other predicates). +func HasPermissionsWith(preds ...predicate.Permission) predicate.Role { + return predicate.Role(func(s *sql.Selector) { + step := newPermissionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsers applies the HasEdge predicate on the "users" edge. +func HasUsers() predicate.Role { + return predicate.Role(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, UsersTable, UsersPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates). +func HasUsersWith(preds ...predicate.User) predicate.Role { + return predicate.Role(func(s *sql.Selector) { + step := newUsersStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Role) predicate.Role { + return predicate.Role(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Role) predicate.Role { + return predicate.Role(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Role) predicate.Role { + return predicate.Role(sql.NotPredicates(p)) +} diff --git a/backend/ent/role_create.go b/backend/ent/role_create.go new file mode 100644 index 0000000..0c4760c --- /dev/null +++ b/backend/ent/role_create.go @@ -0,0 +1,327 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// RoleCreate is the builder for creating a Role entity. +type RoleCreate struct { + config + mutation *RoleMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (rc *RoleCreate) SetName(s string) *RoleCreate { + rc.mutation.SetName(s) + return rc +} + +// SetDescription sets the "description" field. +func (rc *RoleCreate) SetDescription(s string) *RoleCreate { + rc.mutation.SetDescription(s) + return rc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (rc *RoleCreate) SetNillableDescription(s *string) *RoleCreate { + if s != nil { + rc.SetDescription(*s) + } + return rc +} + +// SetCreatedAt sets the "created_at" field. +func (rc *RoleCreate) SetCreatedAt(t time.Time) *RoleCreate { + rc.mutation.SetCreatedAt(t) + return rc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (rc *RoleCreate) SetNillableCreatedAt(t *time.Time) *RoleCreate { + if t != nil { + rc.SetCreatedAt(*t) + } + return rc +} + +// SetUpdatedAt sets the "updated_at" field. +func (rc *RoleCreate) SetUpdatedAt(t time.Time) *RoleCreate { + rc.mutation.SetUpdatedAt(t) + return rc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (rc *RoleCreate) SetNillableUpdatedAt(t *time.Time) *RoleCreate { + if t != nil { + rc.SetUpdatedAt(*t) + } + return rc +} + +// AddPermissionIDs adds the "permissions" edge to the Permission entity by IDs. +func (rc *RoleCreate) AddPermissionIDs(ids ...int) *RoleCreate { + rc.mutation.AddPermissionIDs(ids...) + return rc +} + +// AddPermissions adds the "permissions" edges to the Permission entity. +func (rc *RoleCreate) AddPermissions(p ...*Permission) *RoleCreate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return rc.AddPermissionIDs(ids...) +} + +// AddUserIDs adds the "users" edge to the User entity by IDs. +func (rc *RoleCreate) AddUserIDs(ids ...int) *RoleCreate { + rc.mutation.AddUserIDs(ids...) + return rc +} + +// AddUsers adds the "users" edges to the User entity. +func (rc *RoleCreate) AddUsers(u ...*User) *RoleCreate { + ids := make([]int, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return rc.AddUserIDs(ids...) +} + +// Mutation returns the RoleMutation object of the builder. +func (rc *RoleCreate) Mutation() *RoleMutation { + return rc.mutation +} + +// Save creates the Role in the database. +func (rc *RoleCreate) Save(ctx context.Context) (*Role, error) { + rc.defaults() + return withHooks(ctx, rc.sqlSave, rc.mutation, rc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (rc *RoleCreate) SaveX(ctx context.Context) *Role { + v, err := rc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (rc *RoleCreate) Exec(ctx context.Context) error { + _, err := rc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rc *RoleCreate) ExecX(ctx context.Context) { + if err := rc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (rc *RoleCreate) defaults() { + if _, ok := rc.mutation.CreatedAt(); !ok { + v := role.DefaultCreatedAt() + rc.mutation.SetCreatedAt(v) + } + if _, ok := rc.mutation.UpdatedAt(); !ok { + v := role.DefaultUpdatedAt() + rc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (rc *RoleCreate) check() error { + if _, ok := rc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Role.name"`)} + } + if v, ok := rc.mutation.Name(); ok { + if err := role.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Role.name": %w`, err)} + } + } + if _, ok := rc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Role.created_at"`)} + } + if _, ok := rc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Role.updated_at"`)} + } + return nil +} + +func (rc *RoleCreate) sqlSave(ctx context.Context) (*Role, error) { + if err := rc.check(); err != nil { + return nil, err + } + _node, _spec := rc.createSpec() + if err := sqlgraph.CreateNode(ctx, rc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + rc.mutation.id = &_node.ID + rc.mutation.done = true + return _node, nil +} + +func (rc *RoleCreate) createSpec() (*Role, *sqlgraph.CreateSpec) { + var ( + _node = &Role{config: rc.config} + _spec = sqlgraph.NewCreateSpec(role.Table, sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt)) + ) + if value, ok := rc.mutation.Name(); ok { + _spec.SetField(role.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := rc.mutation.Description(); ok { + _spec.SetField(role.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := rc.mutation.CreatedAt(); ok { + _spec.SetField(role.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := rc.mutation.UpdatedAt(); ok { + _spec.SetField(role.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := rc.mutation.PermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := rc.mutation.UsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// RoleCreateBulk is the builder for creating many Role entities in bulk. +type RoleCreateBulk struct { + config + err error + builders []*RoleCreate +} + +// Save creates the Role entities in the database. +func (rcb *RoleCreateBulk) Save(ctx context.Context) ([]*Role, error) { + if rcb.err != nil { + return nil, rcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(rcb.builders)) + nodes := make([]*Role, len(rcb.builders)) + mutators := make([]Mutator, len(rcb.builders)) + for i := range rcb.builders { + func(i int, root context.Context) { + builder := rcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*RoleMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, rcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, rcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, rcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (rcb *RoleCreateBulk) SaveX(ctx context.Context) []*Role { + v, err := rcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (rcb *RoleCreateBulk) Exec(ctx context.Context) error { + _, err := rcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (rcb *RoleCreateBulk) ExecX(ctx context.Context) { + if err := rcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/role_delete.go b/backend/ent/role_delete.go new file mode 100644 index 0000000..131945d --- /dev/null +++ b/backend/ent/role_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// RoleDelete is the builder for deleting a Role entity. +type RoleDelete struct { + config + hooks []Hook + mutation *RoleMutation +} + +// Where appends a list predicates to the RoleDelete builder. +func (rd *RoleDelete) Where(ps ...predicate.Role) *RoleDelete { + rd.mutation.Where(ps...) + return rd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (rd *RoleDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, rd.sqlExec, rd.mutation, rd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (rd *RoleDelete) ExecX(ctx context.Context) int { + n, err := rd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (rd *RoleDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(role.Table, sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt)) + if ps := rd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, rd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + rd.mutation.done = true + return affected, err +} + +// RoleDeleteOne is the builder for deleting a single Role entity. +type RoleDeleteOne struct { + rd *RoleDelete +} + +// Where appends a list predicates to the RoleDelete builder. +func (rdo *RoleDeleteOne) Where(ps ...predicate.Role) *RoleDeleteOne { + rdo.rd.mutation.Where(ps...) + return rdo +} + +// Exec executes the deletion query. +func (rdo *RoleDeleteOne) Exec(ctx context.Context) error { + n, err := rdo.rd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{role.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (rdo *RoleDeleteOne) ExecX(ctx context.Context) { + if err := rdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/role_query.go b/backend/ent/role_query.go new file mode 100644 index 0000000..8bc9df8 --- /dev/null +++ b/backend/ent/role_query.go @@ -0,0 +1,742 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// RoleQuery is the builder for querying Role entities. +type RoleQuery struct { + config + ctx *QueryContext + order []role.OrderOption + inters []Interceptor + predicates []predicate.Role + withPermissions *PermissionQuery + withUsers *UserQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the RoleQuery builder. +func (rq *RoleQuery) Where(ps ...predicate.Role) *RoleQuery { + rq.predicates = append(rq.predicates, ps...) + return rq +} + +// Limit the number of records to be returned by this query. +func (rq *RoleQuery) Limit(limit int) *RoleQuery { + rq.ctx.Limit = &limit + return rq +} + +// Offset to start from. +func (rq *RoleQuery) Offset(offset int) *RoleQuery { + rq.ctx.Offset = &offset + return rq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (rq *RoleQuery) Unique(unique bool) *RoleQuery { + rq.ctx.Unique = &unique + return rq +} + +// Order specifies how the records should be ordered. +func (rq *RoleQuery) Order(o ...role.OrderOption) *RoleQuery { + rq.order = append(rq.order, o...) + return rq +} + +// QueryPermissions chains the current query on the "permissions" edge. +func (rq *RoleQuery) QueryPermissions() *PermissionQuery { + query := (&PermissionClient{config: rq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := rq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := rq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(role.Table, role.FieldID, selector), + sqlgraph.To(permission.Table, permission.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, role.PermissionsTable, role.PermissionsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(rq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsers chains the current query on the "users" edge. +func (rq *RoleQuery) QueryUsers() *UserQuery { + query := (&UserClient{config: rq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := rq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := rq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(role.Table, role.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, role.UsersTable, role.UsersPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(rq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Role entity from the query. +// Returns a *NotFoundError when no Role was found. +func (rq *RoleQuery) First(ctx context.Context) (*Role, error) { + nodes, err := rq.Limit(1).All(setContextOp(ctx, rq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{role.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (rq *RoleQuery) FirstX(ctx context.Context) *Role { + node, err := rq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Role ID from the query. +// Returns a *NotFoundError when no Role ID was found. +func (rq *RoleQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = rq.Limit(1).IDs(setContextOp(ctx, rq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{role.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (rq *RoleQuery) FirstIDX(ctx context.Context) int { + id, err := rq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Role entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Role entity is found. +// Returns a *NotFoundError when no Role entities are found. +func (rq *RoleQuery) Only(ctx context.Context) (*Role, error) { + nodes, err := rq.Limit(2).All(setContextOp(ctx, rq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{role.Label} + default: + return nil, &NotSingularError{role.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (rq *RoleQuery) OnlyX(ctx context.Context) *Role { + node, err := rq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Role ID in the query. +// Returns a *NotSingularError when more than one Role ID is found. +// Returns a *NotFoundError when no entities are found. +func (rq *RoleQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = rq.Limit(2).IDs(setContextOp(ctx, rq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{role.Label} + default: + err = &NotSingularError{role.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (rq *RoleQuery) OnlyIDX(ctx context.Context) int { + id, err := rq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Roles. +func (rq *RoleQuery) All(ctx context.Context) ([]*Role, error) { + ctx = setContextOp(ctx, rq.ctx, ent.OpQueryAll) + if err := rq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Role, *RoleQuery]() + return withInterceptors[[]*Role](ctx, rq, qr, rq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (rq *RoleQuery) AllX(ctx context.Context) []*Role { + nodes, err := rq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Role IDs. +func (rq *RoleQuery) IDs(ctx context.Context) (ids []int, err error) { + if rq.ctx.Unique == nil && rq.path != nil { + rq.Unique(true) + } + ctx = setContextOp(ctx, rq.ctx, ent.OpQueryIDs) + if err = rq.Select(role.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (rq *RoleQuery) IDsX(ctx context.Context) []int { + ids, err := rq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (rq *RoleQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, rq.ctx, ent.OpQueryCount) + if err := rq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, rq, querierCount[*RoleQuery](), rq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (rq *RoleQuery) CountX(ctx context.Context) int { + count, err := rq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (rq *RoleQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, rq.ctx, ent.OpQueryExist) + switch _, err := rq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (rq *RoleQuery) ExistX(ctx context.Context) bool { + exist, err := rq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the RoleQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (rq *RoleQuery) Clone() *RoleQuery { + if rq == nil { + return nil + } + return &RoleQuery{ + config: rq.config, + ctx: rq.ctx.Clone(), + order: append([]role.OrderOption{}, rq.order...), + inters: append([]Interceptor{}, rq.inters...), + predicates: append([]predicate.Role{}, rq.predicates...), + withPermissions: rq.withPermissions.Clone(), + withUsers: rq.withUsers.Clone(), + // clone intermediate query. + sql: rq.sql.Clone(), + path: rq.path, + } +} + +// WithPermissions tells the query-builder to eager-load the nodes that are connected to +// the "permissions" edge. The optional arguments are used to configure the query builder of the edge. +func (rq *RoleQuery) WithPermissions(opts ...func(*PermissionQuery)) *RoleQuery { + query := (&PermissionClient{config: rq.config}).Query() + for _, opt := range opts { + opt(query) + } + rq.withPermissions = query + return rq +} + +// WithUsers tells the query-builder to eager-load the nodes that are connected to +// the "users" edge. The optional arguments are used to configure the query builder of the edge. +func (rq *RoleQuery) WithUsers(opts ...func(*UserQuery)) *RoleQuery { + query := (&UserClient{config: rq.config}).Query() + for _, opt := range opts { + opt(query) + } + rq.withUsers = query + return rq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Role.Query(). +// GroupBy(role.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (rq *RoleQuery) GroupBy(field string, fields ...string) *RoleGroupBy { + rq.ctx.Fields = append([]string{field}, fields...) + grbuild := &RoleGroupBy{build: rq} + grbuild.flds = &rq.ctx.Fields + grbuild.label = role.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.Role.Query(). +// Select(role.FieldName). +// Scan(ctx, &v) +func (rq *RoleQuery) Select(fields ...string) *RoleSelect { + rq.ctx.Fields = append(rq.ctx.Fields, fields...) + sbuild := &RoleSelect{RoleQuery: rq} + sbuild.label = role.Label + sbuild.flds, sbuild.scan = &rq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RoleSelect configured with the given aggregations. +func (rq *RoleQuery) Aggregate(fns ...AggregateFunc) *RoleSelect { + return rq.Select().Aggregate(fns...) +} + +func (rq *RoleQuery) prepareQuery(ctx context.Context) error { + for _, inter := range rq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, rq); err != nil { + return err + } + } + } + for _, f := range rq.ctx.Fields { + if !role.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if rq.path != nil { + prev, err := rq.path(ctx) + if err != nil { + return err + } + rq.sql = prev + } + return nil +} + +func (rq *RoleQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Role, error) { + var ( + nodes = []*Role{} + _spec = rq.querySpec() + loadedTypes = [2]bool{ + rq.withPermissions != nil, + rq.withUsers != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Role).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Role{config: rq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, rq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := rq.withPermissions; query != nil { + if err := rq.loadPermissions(ctx, query, nodes, + func(n *Role) { n.Edges.Permissions = []*Permission{} }, + func(n *Role, e *Permission) { n.Edges.Permissions = append(n.Edges.Permissions, e) }); err != nil { + return nil, err + } + } + if query := rq.withUsers; query != nil { + if err := rq.loadUsers(ctx, query, nodes, + func(n *Role) { n.Edges.Users = []*User{} }, + func(n *Role, e *User) { n.Edges.Users = append(n.Edges.Users, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (rq *RoleQuery) loadPermissions(ctx context.Context, query *PermissionQuery, nodes []*Role, init func(*Role), assign func(*Role, *Permission)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*Role) + nids := make(map[int]map[*Role]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(role.PermissionsTable) + s.Join(joinT).On(s.C(permission.FieldID), joinT.C(role.PermissionsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(role.PermissionsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(role.PermissionsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*Role]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Permission](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "permissions" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (rq *RoleQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*Role, init func(*Role), assign func(*Role, *User)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*Role) + nids := make(map[int]map[*Role]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(role.UsersTable) + s.Join(joinT).On(s.C(user.FieldID), joinT.C(role.UsersPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(role.UsersPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(role.UsersPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*Role]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*User](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "users" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (rq *RoleQuery) sqlCount(ctx context.Context) (int, error) { + _spec := rq.querySpec() + _spec.Node.Columns = rq.ctx.Fields + if len(rq.ctx.Fields) > 0 { + _spec.Unique = rq.ctx.Unique != nil && *rq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, rq.driver, _spec) +} + +func (rq *RoleQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(role.Table, role.Columns, sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt)) + _spec.From = rq.sql + if unique := rq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if rq.path != nil { + _spec.Unique = true + } + if fields := rq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, role.FieldID) + for i := range fields { + if fields[i] != role.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := rq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := rq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := rq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := rq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (rq *RoleQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(rq.driver.Dialect()) + t1 := builder.Table(role.Table) + columns := rq.ctx.Fields + if len(columns) == 0 { + columns = role.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if rq.sql != nil { + selector = rq.sql + selector.Select(selector.Columns(columns...)...) + } + if rq.ctx.Unique != nil && *rq.ctx.Unique { + selector.Distinct() + } + for _, p := range rq.predicates { + p(selector) + } + for _, p := range rq.order { + p(selector) + } + if offset := rq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := rq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// RoleGroupBy is the group-by builder for Role entities. +type RoleGroupBy struct { + selector + build *RoleQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (rgb *RoleGroupBy) Aggregate(fns ...AggregateFunc) *RoleGroupBy { + rgb.fns = append(rgb.fns, fns...) + return rgb +} + +// Scan applies the selector query and scans the result into the given value. +func (rgb *RoleGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rgb.build.ctx, ent.OpQueryGroupBy) + if err := rgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RoleQuery, *RoleGroupBy](ctx, rgb.build, rgb, rgb.build.inters, v) +} + +func (rgb *RoleGroupBy) sqlScan(ctx context.Context, root *RoleQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(rgb.fns)) + for _, fn := range rgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*rgb.flds)+len(rgb.fns)) + for _, f := range *rgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*rgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := rgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// RoleSelect is the builder for selecting fields of Role entities. +type RoleSelect struct { + *RoleQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (rs *RoleSelect) Aggregate(fns ...AggregateFunc) *RoleSelect { + rs.fns = append(rs.fns, fns...) + return rs +} + +// Scan applies the selector query and scans the result into the given value. +func (rs *RoleSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, rs.ctx, ent.OpQuerySelect) + if err := rs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RoleQuery, *RoleSelect](ctx, rs.RoleQuery, rs, rs.inters, v) +} + +func (rs *RoleSelect) sqlScan(ctx context.Context, root *RoleQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(rs.fns)) + for _, fn := range rs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*rs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := rs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/role_update.go b/backend/ent/role_update.go new file mode 100644 index 0000000..0b21e81 --- /dev/null +++ b/backend/ent/role_update.go @@ -0,0 +1,650 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// RoleUpdate is the builder for updating Role entities. +type RoleUpdate struct { + config + hooks []Hook + mutation *RoleMutation +} + +// Where appends a list predicates to the RoleUpdate builder. +func (ru *RoleUpdate) Where(ps ...predicate.Role) *RoleUpdate { + ru.mutation.Where(ps...) + return ru +} + +// SetName sets the "name" field. +func (ru *RoleUpdate) SetName(s string) *RoleUpdate { + ru.mutation.SetName(s) + return ru +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (ru *RoleUpdate) SetNillableName(s *string) *RoleUpdate { + if s != nil { + ru.SetName(*s) + } + return ru +} + +// SetDescription sets the "description" field. +func (ru *RoleUpdate) SetDescription(s string) *RoleUpdate { + ru.mutation.SetDescription(s) + return ru +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (ru *RoleUpdate) SetNillableDescription(s *string) *RoleUpdate { + if s != nil { + ru.SetDescription(*s) + } + return ru +} + +// ClearDescription clears the value of the "description" field. +func (ru *RoleUpdate) ClearDescription() *RoleUpdate { + ru.mutation.ClearDescription() + return ru +} + +// SetUpdatedAt sets the "updated_at" field. +func (ru *RoleUpdate) SetUpdatedAt(t time.Time) *RoleUpdate { + ru.mutation.SetUpdatedAt(t) + return ru +} + +// AddPermissionIDs adds the "permissions" edge to the Permission entity by IDs. +func (ru *RoleUpdate) AddPermissionIDs(ids ...int) *RoleUpdate { + ru.mutation.AddPermissionIDs(ids...) + return ru +} + +// AddPermissions adds the "permissions" edges to the Permission entity. +func (ru *RoleUpdate) AddPermissions(p ...*Permission) *RoleUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return ru.AddPermissionIDs(ids...) +} + +// AddUserIDs adds the "users" edge to the User entity by IDs. +func (ru *RoleUpdate) AddUserIDs(ids ...int) *RoleUpdate { + ru.mutation.AddUserIDs(ids...) + return ru +} + +// AddUsers adds the "users" edges to the User entity. +func (ru *RoleUpdate) AddUsers(u ...*User) *RoleUpdate { + ids := make([]int, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return ru.AddUserIDs(ids...) +} + +// Mutation returns the RoleMutation object of the builder. +func (ru *RoleUpdate) Mutation() *RoleMutation { + return ru.mutation +} + +// ClearPermissions clears all "permissions" edges to the Permission entity. +func (ru *RoleUpdate) ClearPermissions() *RoleUpdate { + ru.mutation.ClearPermissions() + return ru +} + +// RemovePermissionIDs removes the "permissions" edge to Permission entities by IDs. +func (ru *RoleUpdate) RemovePermissionIDs(ids ...int) *RoleUpdate { + ru.mutation.RemovePermissionIDs(ids...) + return ru +} + +// RemovePermissions removes "permissions" edges to Permission entities. +func (ru *RoleUpdate) RemovePermissions(p ...*Permission) *RoleUpdate { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return ru.RemovePermissionIDs(ids...) +} + +// ClearUsers clears all "users" edges to the User entity. +func (ru *RoleUpdate) ClearUsers() *RoleUpdate { + ru.mutation.ClearUsers() + return ru +} + +// RemoveUserIDs removes the "users" edge to User entities by IDs. +func (ru *RoleUpdate) RemoveUserIDs(ids ...int) *RoleUpdate { + ru.mutation.RemoveUserIDs(ids...) + return ru +} + +// RemoveUsers removes "users" edges to User entities. +func (ru *RoleUpdate) RemoveUsers(u ...*User) *RoleUpdate { + ids := make([]int, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return ru.RemoveUserIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (ru *RoleUpdate) Save(ctx context.Context) (int, error) { + ru.defaults() + return withHooks(ctx, ru.sqlSave, ru.mutation, ru.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ru *RoleUpdate) SaveX(ctx context.Context) int { + affected, err := ru.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (ru *RoleUpdate) Exec(ctx context.Context) error { + _, err := ru.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ru *RoleUpdate) ExecX(ctx context.Context) { + if err := ru.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ru *RoleUpdate) defaults() { + if _, ok := ru.mutation.UpdatedAt(); !ok { + v := role.UpdateDefaultUpdatedAt() + ru.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ru *RoleUpdate) check() error { + if v, ok := ru.mutation.Name(); ok { + if err := role.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Role.name": %w`, err)} + } + } + return nil +} + +func (ru *RoleUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := ru.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(role.Table, role.Columns, sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt)) + if ps := ru.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ru.mutation.Name(); ok { + _spec.SetField(role.FieldName, field.TypeString, value) + } + if value, ok := ru.mutation.Description(); ok { + _spec.SetField(role.FieldDescription, field.TypeString, value) + } + if ru.mutation.DescriptionCleared() { + _spec.ClearField(role.FieldDescription, field.TypeString) + } + if value, ok := ru.mutation.UpdatedAt(); ok { + _spec.SetField(role.FieldUpdatedAt, field.TypeTime, value) + } + if ru.mutation.PermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ru.mutation.RemovedPermissionsIDs(); len(nodes) > 0 && !ru.mutation.PermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ru.mutation.PermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if ru.mutation.UsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ru.mutation.RemovedUsersIDs(); len(nodes) > 0 && !ru.mutation.UsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ru.mutation.UsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, ru.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{role.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + ru.mutation.done = true + return n, nil +} + +// RoleUpdateOne is the builder for updating a single Role entity. +type RoleUpdateOne struct { + config + fields []string + hooks []Hook + mutation *RoleMutation +} + +// SetName sets the "name" field. +func (ruo *RoleUpdateOne) SetName(s string) *RoleUpdateOne { + ruo.mutation.SetName(s) + return ruo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (ruo *RoleUpdateOne) SetNillableName(s *string) *RoleUpdateOne { + if s != nil { + ruo.SetName(*s) + } + return ruo +} + +// SetDescription sets the "description" field. +func (ruo *RoleUpdateOne) SetDescription(s string) *RoleUpdateOne { + ruo.mutation.SetDescription(s) + return ruo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (ruo *RoleUpdateOne) SetNillableDescription(s *string) *RoleUpdateOne { + if s != nil { + ruo.SetDescription(*s) + } + return ruo +} + +// ClearDescription clears the value of the "description" field. +func (ruo *RoleUpdateOne) ClearDescription() *RoleUpdateOne { + ruo.mutation.ClearDescription() + return ruo +} + +// SetUpdatedAt sets the "updated_at" field. +func (ruo *RoleUpdateOne) SetUpdatedAt(t time.Time) *RoleUpdateOne { + ruo.mutation.SetUpdatedAt(t) + return ruo +} + +// AddPermissionIDs adds the "permissions" edge to the Permission entity by IDs. +func (ruo *RoleUpdateOne) AddPermissionIDs(ids ...int) *RoleUpdateOne { + ruo.mutation.AddPermissionIDs(ids...) + return ruo +} + +// AddPermissions adds the "permissions" edges to the Permission entity. +func (ruo *RoleUpdateOne) AddPermissions(p ...*Permission) *RoleUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return ruo.AddPermissionIDs(ids...) +} + +// AddUserIDs adds the "users" edge to the User entity by IDs. +func (ruo *RoleUpdateOne) AddUserIDs(ids ...int) *RoleUpdateOne { + ruo.mutation.AddUserIDs(ids...) + return ruo +} + +// AddUsers adds the "users" edges to the User entity. +func (ruo *RoleUpdateOne) AddUsers(u ...*User) *RoleUpdateOne { + ids := make([]int, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return ruo.AddUserIDs(ids...) +} + +// Mutation returns the RoleMutation object of the builder. +func (ruo *RoleUpdateOne) Mutation() *RoleMutation { + return ruo.mutation +} + +// ClearPermissions clears all "permissions" edges to the Permission entity. +func (ruo *RoleUpdateOne) ClearPermissions() *RoleUpdateOne { + ruo.mutation.ClearPermissions() + return ruo +} + +// RemovePermissionIDs removes the "permissions" edge to Permission entities by IDs. +func (ruo *RoleUpdateOne) RemovePermissionIDs(ids ...int) *RoleUpdateOne { + ruo.mutation.RemovePermissionIDs(ids...) + return ruo +} + +// RemovePermissions removes "permissions" edges to Permission entities. +func (ruo *RoleUpdateOne) RemovePermissions(p ...*Permission) *RoleUpdateOne { + ids := make([]int, len(p)) + for i := range p { + ids[i] = p[i].ID + } + return ruo.RemovePermissionIDs(ids...) +} + +// ClearUsers clears all "users" edges to the User entity. +func (ruo *RoleUpdateOne) ClearUsers() *RoleUpdateOne { + ruo.mutation.ClearUsers() + return ruo +} + +// RemoveUserIDs removes the "users" edge to User entities by IDs. +func (ruo *RoleUpdateOne) RemoveUserIDs(ids ...int) *RoleUpdateOne { + ruo.mutation.RemoveUserIDs(ids...) + return ruo +} + +// RemoveUsers removes "users" edges to User entities. +func (ruo *RoleUpdateOne) RemoveUsers(u ...*User) *RoleUpdateOne { + ids := make([]int, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return ruo.RemoveUserIDs(ids...) +} + +// Where appends a list predicates to the RoleUpdate builder. +func (ruo *RoleUpdateOne) Where(ps ...predicate.Role) *RoleUpdateOne { + ruo.mutation.Where(ps...) + return ruo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (ruo *RoleUpdateOne) Select(field string, fields ...string) *RoleUpdateOne { + ruo.fields = append([]string{field}, fields...) + return ruo +} + +// Save executes the query and returns the updated Role entity. +func (ruo *RoleUpdateOne) Save(ctx context.Context) (*Role, error) { + ruo.defaults() + return withHooks(ctx, ruo.sqlSave, ruo.mutation, ruo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (ruo *RoleUpdateOne) SaveX(ctx context.Context) *Role { + node, err := ruo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (ruo *RoleUpdateOne) Exec(ctx context.Context) error { + _, err := ruo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ruo *RoleUpdateOne) ExecX(ctx context.Context) { + if err := ruo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ruo *RoleUpdateOne) defaults() { + if _, ok := ruo.mutation.UpdatedAt(); !ok { + v := role.UpdateDefaultUpdatedAt() + ruo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ruo *RoleUpdateOne) check() error { + if v, ok := ruo.mutation.Name(); ok { + if err := role.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Role.name": %w`, err)} + } + } + return nil +} + +func (ruo *RoleUpdateOne) sqlSave(ctx context.Context) (_node *Role, err error) { + if err := ruo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(role.Table, role.Columns, sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt)) + id, ok := ruo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Role.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := ruo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, role.FieldID) + for _, f := range fields { + if !role.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != role.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := ruo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := ruo.mutation.Name(); ok { + _spec.SetField(role.FieldName, field.TypeString, value) + } + if value, ok := ruo.mutation.Description(); ok { + _spec.SetField(role.FieldDescription, field.TypeString, value) + } + if ruo.mutation.DescriptionCleared() { + _spec.ClearField(role.FieldDescription, field.TypeString) + } + if value, ok := ruo.mutation.UpdatedAt(); ok { + _spec.SetField(role.FieldUpdatedAt, field.TypeTime, value) + } + if ruo.mutation.PermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ruo.mutation.RemovedPermissionsIDs(); len(nodes) > 0 && !ruo.mutation.PermissionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ruo.mutation.PermissionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: role.PermissionsTable, + Columns: role.PermissionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(permission.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if ruo.mutation.UsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ruo.mutation.RemovedUsersIDs(); len(nodes) > 0 && !ruo.mutation.UsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := ruo.mutation.UsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: role.UsersTable, + Columns: role.UsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Role{config: ruo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, ruo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{role.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + ruo.mutation.done = true + return _node, nil +} diff --git a/backend/ent/runtime.go b/backend/ent/runtime.go new file mode 100644 index 0000000..7b523e1 --- /dev/null +++ b/backend/ent/runtime.go @@ -0,0 +1,282 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "time" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorrole" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycategory" + "tss-rocks-be/ent/dailycategorycontent" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/postcontributor" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/schema" + "tss-rocks-be/ent/user" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + categoryFields := schema.Category{}.Fields() + _ = categoryFields + // categoryDescCreatedAt is the schema descriptor for created_at field. + categoryDescCreatedAt := categoryFields[0].Descriptor() + // category.DefaultCreatedAt holds the default value on creation for the created_at field. + category.DefaultCreatedAt = categoryDescCreatedAt.Default.(func() time.Time) + // categoryDescUpdatedAt is the schema descriptor for updated_at field. + categoryDescUpdatedAt := categoryFields[1].Descriptor() + // category.DefaultUpdatedAt holds the default value on creation for the updated_at field. + category.DefaultUpdatedAt = categoryDescUpdatedAt.Default.(func() time.Time) + // category.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + category.UpdateDefaultUpdatedAt = categoryDescUpdatedAt.UpdateDefault.(func() time.Time) + categorycontentFields := schema.CategoryContent{}.Fields() + _ = categorycontentFields + // categorycontentDescName is the schema descriptor for name field. + categorycontentDescName := categorycontentFields[1].Descriptor() + // categorycontent.NameValidator is a validator for the "name" field. It is called by the builders before save. + categorycontent.NameValidator = categorycontentDescName.Validators[0].(func(string) error) + // categorycontentDescSlug is the schema descriptor for slug field. + categorycontentDescSlug := categorycontentFields[3].Descriptor() + // categorycontent.SlugValidator is a validator for the "slug" field. It is called by the builders before save. + categorycontent.SlugValidator = categorycontentDescSlug.Validators[0].(func(string) error) + contributorFields := schema.Contributor{}.Fields() + _ = contributorFields + // contributorDescName is the schema descriptor for name field. + contributorDescName := contributorFields[0].Descriptor() + // contributor.NameValidator is a validator for the "name" field. It is called by the builders before save. + contributor.NameValidator = contributorDescName.Validators[0].(func(string) error) + // contributorDescCreatedAt is the schema descriptor for created_at field. + contributorDescCreatedAt := contributorFields[3].Descriptor() + // contributor.DefaultCreatedAt holds the default value on creation for the created_at field. + contributor.DefaultCreatedAt = contributorDescCreatedAt.Default.(func() time.Time) + // contributorDescUpdatedAt is the schema descriptor for updated_at field. + contributorDescUpdatedAt := contributorFields[4].Descriptor() + // contributor.DefaultUpdatedAt holds the default value on creation for the updated_at field. + contributor.DefaultUpdatedAt = contributorDescUpdatedAt.Default.(func() time.Time) + // contributor.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + contributor.UpdateDefaultUpdatedAt = contributorDescUpdatedAt.UpdateDefault.(func() time.Time) + contributorroleFields := schema.ContributorRole{}.Fields() + _ = contributorroleFields + // contributorroleDescName is the schema descriptor for name field. + contributorroleDescName := contributorroleFields[0].Descriptor() + // contributorrole.NameValidator is a validator for the "name" field. It is called by the builders before save. + contributorrole.NameValidator = func() func(string) error { + validators := contributorroleDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + contributorsociallinkFields := schema.ContributorSocialLink{}.Fields() + _ = contributorsociallinkFields + // contributorsociallinkDescValue is the schema descriptor for value field. + contributorsociallinkDescValue := contributorsociallinkFields[2].Descriptor() + // contributorsociallink.ValueValidator is a validator for the "value" field. It is called by the builders before save. + contributorsociallink.ValueValidator = contributorsociallinkDescValue.Validators[0].(func(string) error) + dailyFields := schema.Daily{}.Fields() + _ = dailyFields + // dailyDescImageURL is the schema descriptor for image_url field. + dailyDescImageURL := dailyFields[1].Descriptor() + // daily.ImageURLValidator is a validator for the "image_url" field. It is called by the builders before save. + daily.ImageURLValidator = dailyDescImageURL.Validators[0].(func(string) error) + // dailyDescCreatedAt is the schema descriptor for created_at field. + dailyDescCreatedAt := dailyFields[2].Descriptor() + // daily.DefaultCreatedAt holds the default value on creation for the created_at field. + daily.DefaultCreatedAt = dailyDescCreatedAt.Default.(func() time.Time) + // dailyDescUpdatedAt is the schema descriptor for updated_at field. + dailyDescUpdatedAt := dailyFields[3].Descriptor() + // daily.DefaultUpdatedAt holds the default value on creation for the updated_at field. + daily.DefaultUpdatedAt = dailyDescUpdatedAt.Default.(func() time.Time) + // daily.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + daily.UpdateDefaultUpdatedAt = dailyDescUpdatedAt.UpdateDefault.(func() time.Time) + // dailyDescID is the schema descriptor for id field. + dailyDescID := dailyFields[0].Descriptor() + // daily.IDValidator is a validator for the "id" field. It is called by the builders before save. + daily.IDValidator = func() func(string) error { + validators := dailyDescID.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + validators[2].(func(string) error), + validators[3].(func(string) error), + } + return func(id string) error { + for _, fn := range fns { + if err := fn(id); err != nil { + return err + } + } + return nil + } + }() + dailycategoryFields := schema.DailyCategory{}.Fields() + _ = dailycategoryFields + // dailycategoryDescCreatedAt is the schema descriptor for created_at field. + dailycategoryDescCreatedAt := dailycategoryFields[0].Descriptor() + // dailycategory.DefaultCreatedAt holds the default value on creation for the created_at field. + dailycategory.DefaultCreatedAt = dailycategoryDescCreatedAt.Default.(func() time.Time) + // dailycategoryDescUpdatedAt is the schema descriptor for updated_at field. + dailycategoryDescUpdatedAt := dailycategoryFields[1].Descriptor() + // dailycategory.DefaultUpdatedAt holds the default value on creation for the updated_at field. + dailycategory.DefaultUpdatedAt = dailycategoryDescUpdatedAt.Default.(func() time.Time) + // dailycategory.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + dailycategory.UpdateDefaultUpdatedAt = dailycategoryDescUpdatedAt.UpdateDefault.(func() time.Time) + dailycategorycontentFields := schema.DailyCategoryContent{}.Fields() + _ = dailycategorycontentFields + // dailycategorycontentDescName is the schema descriptor for name field. + dailycategorycontentDescName := dailycategorycontentFields[1].Descriptor() + // dailycategorycontent.NameValidator is a validator for the "name" field. It is called by the builders before save. + dailycategorycontent.NameValidator = dailycategorycontentDescName.Validators[0].(func(string) error) + dailycontentFields := schema.DailyContent{}.Fields() + _ = dailycontentFields + // dailycontentDescQuote is the schema descriptor for quote field. + dailycontentDescQuote := dailycontentFields[1].Descriptor() + // dailycontent.QuoteValidator is a validator for the "quote" field. It is called by the builders before save. + dailycontent.QuoteValidator = dailycontentDescQuote.Validators[0].(func(string) error) + mediaFields := schema.Media{}.Fields() + _ = mediaFields + // mediaDescStorageID is the schema descriptor for storage_id field. + mediaDescStorageID := mediaFields[0].Descriptor() + // media.StorageIDValidator is a validator for the "storage_id" field. It is called by the builders before save. + media.StorageIDValidator = mediaDescStorageID.Validators[0].(func(string) error) + // mediaDescOriginalName is the schema descriptor for original_name field. + mediaDescOriginalName := mediaFields[1].Descriptor() + // media.OriginalNameValidator is a validator for the "original_name" field. It is called by the builders before save. + media.OriginalNameValidator = mediaDescOriginalName.Validators[0].(func(string) error) + // mediaDescMimeType is the schema descriptor for mime_type field. + mediaDescMimeType := mediaFields[2].Descriptor() + // media.MimeTypeValidator is a validator for the "mime_type" field. It is called by the builders before save. + media.MimeTypeValidator = mediaDescMimeType.Validators[0].(func(string) error) + // mediaDescSize is the schema descriptor for size field. + mediaDescSize := mediaFields[3].Descriptor() + // media.SizeValidator is a validator for the "size" field. It is called by the builders before save. + media.SizeValidator = mediaDescSize.Validators[0].(func(int64) error) + // mediaDescURL is the schema descriptor for url field. + mediaDescURL := mediaFields[4].Descriptor() + // media.URLValidator is a validator for the "url" field. It is called by the builders before save. + media.URLValidator = mediaDescURL.Validators[0].(func(string) error) + // mediaDescCreatedAt is the schema descriptor for created_at field. + mediaDescCreatedAt := mediaFields[5].Descriptor() + // media.DefaultCreatedAt holds the default value on creation for the created_at field. + media.DefaultCreatedAt = mediaDescCreatedAt.Default.(func() time.Time) + // mediaDescUpdatedAt is the schema descriptor for updated_at field. + mediaDescUpdatedAt := mediaFields[6].Descriptor() + // media.DefaultUpdatedAt holds the default value on creation for the updated_at field. + media.DefaultUpdatedAt = mediaDescUpdatedAt.Default.(func() time.Time) + // media.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + media.UpdateDefaultUpdatedAt = mediaDescUpdatedAt.UpdateDefault.(func() time.Time) + permissionFields := schema.Permission{}.Fields() + _ = permissionFields + // permissionDescResource is the schema descriptor for resource field. + permissionDescResource := permissionFields[0].Descriptor() + // permission.ResourceValidator is a validator for the "resource" field. It is called by the builders before save. + permission.ResourceValidator = permissionDescResource.Validators[0].(func(string) error) + // permissionDescAction is the schema descriptor for action field. + permissionDescAction := permissionFields[1].Descriptor() + // permission.ActionValidator is a validator for the "action" field. It is called by the builders before save. + permission.ActionValidator = permissionDescAction.Validators[0].(func(string) error) + // permissionDescCreatedAt is the schema descriptor for created_at field. + permissionDescCreatedAt := permissionFields[3].Descriptor() + // permission.DefaultCreatedAt holds the default value on creation for the created_at field. + permission.DefaultCreatedAt = permissionDescCreatedAt.Default.(func() time.Time) + // permissionDescUpdatedAt is the schema descriptor for updated_at field. + permissionDescUpdatedAt := permissionFields[4].Descriptor() + // permission.DefaultUpdatedAt holds the default value on creation for the updated_at field. + permission.DefaultUpdatedAt = permissionDescUpdatedAt.Default.(func() time.Time) + // permission.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + permission.UpdateDefaultUpdatedAt = permissionDescUpdatedAt.UpdateDefault.(func() time.Time) + postFields := schema.Post{}.Fields() + _ = postFields + // postDescSlug is the schema descriptor for slug field. + postDescSlug := postFields[1].Descriptor() + // post.SlugValidator is a validator for the "slug" field. It is called by the builders before save. + post.SlugValidator = postDescSlug.Validators[0].(func(string) error) + // postDescCreatedAt is the schema descriptor for created_at field. + postDescCreatedAt := postFields[2].Descriptor() + // post.DefaultCreatedAt holds the default value on creation for the created_at field. + post.DefaultCreatedAt = postDescCreatedAt.Default.(func() time.Time) + // postDescUpdatedAt is the schema descriptor for updated_at field. + postDescUpdatedAt := postFields[3].Descriptor() + // post.DefaultUpdatedAt holds the default value on creation for the updated_at field. + post.DefaultUpdatedAt = postDescUpdatedAt.Default.(func() time.Time) + // post.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + post.UpdateDefaultUpdatedAt = postDescUpdatedAt.UpdateDefault.(func() time.Time) + postcontentFields := schema.PostContent{}.Fields() + _ = postcontentFields + // postcontentDescTitle is the schema descriptor for title field. + postcontentDescTitle := postcontentFields[1].Descriptor() + // postcontent.TitleValidator is a validator for the "title" field. It is called by the builders before save. + postcontent.TitleValidator = postcontentDescTitle.Validators[0].(func(string) error) + // postcontentDescContentMarkdown is the schema descriptor for content_markdown field. + postcontentDescContentMarkdown := postcontentFields[2].Descriptor() + // postcontent.ContentMarkdownValidator is a validator for the "content_markdown" field. It is called by the builders before save. + postcontent.ContentMarkdownValidator = postcontentDescContentMarkdown.Validators[0].(func(string) error) + // postcontentDescSummary is the schema descriptor for summary field. + postcontentDescSummary := postcontentFields[3].Descriptor() + // postcontent.SummaryValidator is a validator for the "summary" field. It is called by the builders before save. + postcontent.SummaryValidator = postcontentDescSummary.Validators[0].(func(string) error) + // postcontentDescSlug is the schema descriptor for slug field. + postcontentDescSlug := postcontentFields[6].Descriptor() + // postcontent.SlugValidator is a validator for the "slug" field. It is called by the builders before save. + postcontent.SlugValidator = postcontentDescSlug.Validators[0].(func(string) error) + postcontributorFields := schema.PostContributor{}.Fields() + _ = postcontributorFields + // postcontributorDescCreatedAt is the schema descriptor for created_at field. + postcontributorDescCreatedAt := postcontributorFields[1].Descriptor() + // postcontributor.DefaultCreatedAt holds the default value on creation for the created_at field. + postcontributor.DefaultCreatedAt = postcontributorDescCreatedAt.Default.(func() time.Time) + roleFields := schema.Role{}.Fields() + _ = roleFields + // roleDescName is the schema descriptor for name field. + roleDescName := roleFields[0].Descriptor() + // role.NameValidator is a validator for the "name" field. It is called by the builders before save. + role.NameValidator = roleDescName.Validators[0].(func(string) error) + // roleDescCreatedAt is the schema descriptor for created_at field. + roleDescCreatedAt := roleFields[2].Descriptor() + // role.DefaultCreatedAt holds the default value on creation for the created_at field. + role.DefaultCreatedAt = roleDescCreatedAt.Default.(func() time.Time) + // roleDescUpdatedAt is the schema descriptor for updated_at field. + roleDescUpdatedAt := roleFields[3].Descriptor() + // role.DefaultUpdatedAt holds the default value on creation for the updated_at field. + role.DefaultUpdatedAt = roleDescUpdatedAt.Default.(func() time.Time) + // role.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + role.UpdateDefaultUpdatedAt = roleDescUpdatedAt.UpdateDefault.(func() time.Time) + userFields := schema.User{}.Fields() + _ = userFields + // userDescEmail is the schema descriptor for email field. + userDescEmail := userFields[0].Descriptor() + // user.EmailValidator is a validator for the "email" field. It is called by the builders before save. + user.EmailValidator = userDescEmail.Validators[0].(func(string) error) + // userDescPasswordHash is the schema descriptor for password_hash field. + userDescPasswordHash := userFields[1].Descriptor() + // user.PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + user.PasswordHashValidator = userDescPasswordHash.Validators[0].(func(string) error) + // userDescCreatedAt is the schema descriptor for created_at field. + userDescCreatedAt := userFields[3].Descriptor() + // user.DefaultCreatedAt holds the default value on creation for the created_at field. + user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time) + // userDescUpdatedAt is the schema descriptor for updated_at field. + userDescUpdatedAt := userFields[4].Descriptor() + // user.DefaultUpdatedAt holds the default value on creation for the updated_at field. + user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time) + // user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + user.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time) +} diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go new file mode 100644 index 0000000..3dc28ef --- /dev/null +++ b/backend/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in tss-rocks-be/ent/runtime.go + +const ( + Version = "v0.14.1" // Version of ent codegen. + Sum = "h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s=" // Sum of ent codegen. +) diff --git a/backend/ent/schema/category.go b/backend/ent/schema/category.go new file mode 100644 index 0000000..756445f --- /dev/null +++ b/backend/ent/schema/category.go @@ -0,0 +1,33 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" +) + +// Category holds the schema definition for the Category entity. +type Category struct { + ent.Schema +} + +// Fields of the Category. +func (Category) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Category. +func (Category) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("contents", CategoryContent.Type), + edge.To("posts", Post.Type), + edge.To("daily_items", Daily.Type), + } +} diff --git a/backend/ent/schema/categorycontent.go b/backend/ent/schema/categorycontent.go new file mode 100644 index 0000000..5cd5047 --- /dev/null +++ b/backend/ent/schema/categorycontent.go @@ -0,0 +1,51 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/index" +) + +// CategoryContent holds the schema definition for the CategoryContent entity. +type CategoryContent struct { + ent.Schema +} + +// Fields of the CategoryContent. +func (CategoryContent) Fields() []ent.Field { + return []ent.Field{ + field.Enum("language_code"). + NamedValues( + "EN", "en", + "ZH_HANS", "zh-Hans", + "ZH_HANT", "zh-Hant", + ), + field.String("name"). + NotEmpty(), + field.String("description"). + Optional(), + field.String("slug"). + NotEmpty(), + } +} + +// Edges of the CategoryContent. +func (CategoryContent) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("category", Category.Type). + Ref("contents"). + Unique(), + } +} + +// Indexes of the CategoryContent. +func (CategoryContent) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("language_code", "slug"). + Unique(), + index.Fields("language_code"). + Edges("category"). + Unique(), + } +} diff --git a/backend/ent/schema/contributor.go b/backend/ent/schema/contributor.go new file mode 100644 index 0000000..8bb2261 --- /dev/null +++ b/backend/ent/schema/contributor.go @@ -0,0 +1,41 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" +) + +// Contributor holds the schema definition for the Contributor entity. +type Contributor struct { + ent.Schema +} + +// Fields of the Contributor. +func (Contributor) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + NotEmpty(), + field.String("avatar_url"). + Optional(), + field.Text("bio"). + Optional(), + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Contributor. +func (Contributor) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("contributors"). + Unique(), + edge.To("social_links", ContributorSocialLink.Type), + edge.To("posts", PostContributor.Type), + } +} diff --git a/backend/ent/schema/contributorrole.go b/backend/ent/schema/contributorrole.go new file mode 100644 index 0000000..aa308a8 --- /dev/null +++ b/backend/ent/schema/contributorrole.go @@ -0,0 +1,43 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "fmt" +) + +// ContributorRole holds the schema definition for the ContributorRole entity. +type ContributorRole struct { + ent.Schema +} + +// Fields of the ContributorRole. +func (ContributorRole) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + Unique(). + NotEmpty(). + Validate(func(s string) error { + // 检查是否是默认角色之一 + validRoles := map[string]bool{ + "submitter": true, + "author": true, + "reviewer": true, + "translator": true, + "translation_reviewer": true, + } + if !validRoles[s] { + return fmt.Errorf("invalid role name: %s", s) + } + return nil + }), + } +} + +// Edges of the ContributorRole. +func (ContributorRole) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("post_contributors", PostContributor.Type), + } +} diff --git a/backend/ent/schema/contributorsociallink.go b/backend/ent/schema/contributorsociallink.go new file mode 100644 index 0000000..8dc4438 --- /dev/null +++ b/backend/ent/schema/contributorsociallink.go @@ -0,0 +1,40 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" +) + +// ContributorSocialLink holds the schema definition for the ContributorSocialLink entity. +type ContributorSocialLink struct { + ent.Schema +} + +// Fields of the ContributorSocialLink. +func (ContributorSocialLink) Fields() []ent.Field { + return []ent.Field{ + field.Enum("type"). + NamedValues( + "TWITTER", "twitter", + "FACEBOOK", "facebook", + "INSTAGRAM", "instagram", + "LINKEDIN", "linkedin", + "GITHUB", "github", + "WEBSITE", "website", + ), + field.String("name"). + Optional(), + field.String("value"). + NotEmpty(), + } +} + +// Edges of the ContributorSocialLink. +func (ContributorSocialLink) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("contributor", Contributor.Type). + Ref("social_links"). + Unique(), + } +} diff --git a/backend/ent/schema/daily.go b/backend/ent/schema/daily.go new file mode 100644 index 0000000..918960c --- /dev/null +++ b/backend/ent/schema/daily.go @@ -0,0 +1,45 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" + "regexp" +) + +// Daily holds the schema definition for the Daily entity. +type Daily struct { + ent.Schema +} + +// Fields of the Daily. +func (Daily) Fields() []ent.Field { + return []ent.Field{ + field.String("id"). + Match(regexp.MustCompile(`^[0-9]{6}$`)). + Unique(). + Immutable(). + NotEmpty(). + MaxLen(6). + MinLen(6), + field.String("image_url"). + NotEmpty(), + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Daily. +func (Daily) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("category", Category.Type). + Ref("daily_items"). + Unique(). + Required(), + edge.To("contents", DailyContent.Type), + } +} diff --git a/backend/ent/schema/dailycategory.go b/backend/ent/schema/dailycategory.go new file mode 100644 index 0000000..8646d69 --- /dev/null +++ b/backend/ent/schema/dailycategory.go @@ -0,0 +1,32 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" +) + +// DailyCategory holds the schema definition for the DailyCategory entity. +type DailyCategory struct { + ent.Schema +} + +// Fields of the DailyCategory. +func (DailyCategory) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the DailyCategory. +func (DailyCategory) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("contents", DailyCategoryContent.Type), + edge.To("daily_items", Daily.Type), + } +} diff --git a/backend/ent/schema/dailycategorycontent.go b/backend/ent/schema/dailycategorycontent.go new file mode 100644 index 0000000..432041d --- /dev/null +++ b/backend/ent/schema/dailycategorycontent.go @@ -0,0 +1,41 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/index" +) + +// DailyCategoryContent holds the schema definition for the DailyCategoryContent entity. +type DailyCategoryContent struct { + ent.Schema +} + +// Fields of the DailyCategoryContent. +func (DailyCategoryContent) Fields() []ent.Field { + return []ent.Field{ + field.Enum("language_code"). + Values("en", "zh-Hans", "zh-Hant"), + field.String("name"). + NotEmpty(), + } +} + +// Edges of the DailyCategoryContent. +func (DailyCategoryContent) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("category", DailyCategory.Type). + Ref("contents"). + Unique(), + } +} + +// Indexes of the DailyCategoryContent. +func (DailyCategoryContent) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("language_code"). + Edges("category"). + Unique(), + } +} diff --git a/backend/ent/schema/dailycontent.go b/backend/ent/schema/dailycontent.go new file mode 100644 index 0000000..1771ea9 --- /dev/null +++ b/backend/ent/schema/dailycontent.go @@ -0,0 +1,45 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/index" +) + +// DailyContent holds the schema definition for the DailyContent entity. +type DailyContent struct { + ent.Schema +} + +// Fields of the DailyContent. +func (DailyContent) Fields() []ent.Field { + return []ent.Field{ + field.Enum("language_code"). + NamedValues( + "EN", "en", + "ZH_HANS", "zh-Hans", + "ZH_HANT", "zh-Hant", + ), + field.Text("quote"). + NotEmpty(), + } +} + +// Edges of the DailyContent. +func (DailyContent) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("daily", Daily.Type). + Ref("contents"). + Unique(), + } +} + +// Indexes of the DailyContent. +func (DailyContent) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("language_code"). + Edges("daily"). + Unique(), + } +} diff --git a/backend/ent/schema/media.go b/backend/ent/schema/media.go new file mode 100644 index 0000000..5c41d70 --- /dev/null +++ b/backend/ent/schema/media.go @@ -0,0 +1,47 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" +) + +// Media holds the schema definition for the Media entity. +type Media struct { + ent.Schema +} + +// Fields of the Media. +func (Media) Fields() []ent.Field { + return []ent.Field{ + field.String("storage_id"). + NotEmpty(). + Unique(), + field.String("original_name"). + NotEmpty(), + field.String("mime_type"). + NotEmpty(), + field.Int64("size"). + Positive(), + field.String("url"). + NotEmpty(), + field.Time("created_at"). + Default(time.Now). + Immutable(), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + field.String("created_by"). + Optional(), + } +} + +// Edges of the Media. +func (Media) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("owner", User.Type). + Ref("media"). + Unique(), + } +} diff --git a/backend/ent/schema/permission.go b/backend/ent/schema/permission.go new file mode 100644 index 0000000..f2e2f09 --- /dev/null +++ b/backend/ent/schema/permission.go @@ -0,0 +1,52 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "time" +) + +// Permission holds the schema definition for the Permission entity. +type Permission struct { + ent.Schema +} + +// Fields of the Permission. +func (Permission) Fields() []ent.Field { + return []ent.Field{ + field.String("resource"). + NotEmpty(). + Comment("Resource name, e.g., 'media', 'post'"), + field.String("action"). + NotEmpty(). + Comment("Action name, e.g., 'create', 'read', 'update', 'delete'"), + field.String("description"). + Optional(). + Comment("Human readable description of the permission"), + field.Time("created_at"). + Default(time.Now). + Immutable(), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Permission. +func (Permission) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("roles", Role.Type). + Ref("permissions"), + } +} + +// Indexes of the Permission. +func (Permission) Indexes() []ent.Index { + return []ent.Index{ + // Create a unique index on resource and action + index.Fields("resource", "action"). + Unique(), + } +} diff --git a/backend/ent/schema/post.go b/backend/ent/schema/post.go new file mode 100644 index 0000000..05362e4 --- /dev/null +++ b/backend/ent/schema/post.go @@ -0,0 +1,41 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "time" +) + +// Post holds the schema definition for the Post entity. +type Post struct { + ent.Schema +} + +// Fields of the Post. +func (Post) Fields() []ent.Field { + return []ent.Field{ + field.Enum("status"). + Values("draft", "published", "archived"). + Default("draft"), + field.String("slug"). + NotEmpty(). + Unique(), + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Post. +func (Post) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("contents", PostContent.Type), + edge.To("contributors", PostContributor.Type), + edge.From("category", Category.Type). + Ref("posts"). + Unique(), + } +} diff --git a/backend/ent/schema/postcontent.go b/backend/ent/schema/postcontent.go new file mode 100644 index 0000000..cdc0253 --- /dev/null +++ b/backend/ent/schema/postcontent.go @@ -0,0 +1,55 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/index" +) + +// PostContent holds the schema definition for the PostContent entity. +type PostContent struct { + ent.Schema +} + +// Fields of the PostContent. +func (PostContent) Fields() []ent.Field { + return []ent.Field{ + field.Enum("language_code"). + NamedValues( + "EN", "en", + "ZH_HANS", "zh-Hans", + "ZH_HANT", "zh-Hant", + ), + field.String("title"). + NotEmpty(), + field.Text("content_markdown"). + NotEmpty(), + field.String("summary"). + NotEmpty(), + field.String("meta_keywords"). + Optional(), + field.String("meta_description"). + Optional(), + field.String("slug"). + NotEmpty(), + } +} + +// Edges of the PostContent. +func (PostContent) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("post", Post.Type). + Ref("contents"). + Unique(), + } +} + +// Indexes of the PostContent. +func (PostContent) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("language_code"). + Edges("post"). + Unique(), + } +} diff --git a/backend/ent/schema/postcontributor.go b/backend/ent/schema/postcontributor.go new file mode 100644 index 0000000..d366a8a --- /dev/null +++ b/backend/ent/schema/postcontributor.go @@ -0,0 +1,50 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/index" + "time" +) + +// PostContributor holds the schema definition for the PostContributor entity. +type PostContributor struct { + ent.Schema +} + +// Fields of the PostContributor. +func (PostContributor) Fields() []ent.Field { + return []ent.Field{ + field.Enum("language_code"). + Values("en", "zh-Hans", "zh-Hant"). + Optional(). + Nillable(), + field.Time("created_at"). + Default(time.Now), + } +} + +// Edges of the PostContributor. +func (PostContributor) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("post", Post.Type). + Ref("contributors"). + Unique(), + edge.From("contributor", Contributor.Type). + Ref("posts"). + Unique(), + edge.From("role", ContributorRole.Type). + Ref("post_contributors"). + Unique(), + } +} + +// Indexes of the PostContributor. +func (PostContributor) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("language_code"). + Edges("post", "contributor", "role"). + Unique(), + } +} diff --git a/backend/ent/schema/role.go b/backend/ent/schema/role.go new file mode 100644 index 0000000..0188682 --- /dev/null +++ b/backend/ent/schema/role.go @@ -0,0 +1,41 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "time" +) + +// Role holds the schema definition for the Role entity. +type Role struct { + ent.Schema +} + +// Fields of the Role. +func (Role) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + Unique(). + NotEmpty(). + Comment("Role name, e.g., 'admin', 'editor'"), + field.String("description"). + Optional(). + Comment("Human readable description of the role"), + field.Time("created_at"). + Default(time.Now). + Immutable(), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the Role. +func (Role) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("permissions", Permission.Type), + edge.From("users", User.Type). + Ref("roles"), + } +} diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go new file mode 100644 index 0000000..75384be --- /dev/null +++ b/backend/ent/schema/user.go @@ -0,0 +1,42 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "time" +) + +// User holds the schema definition for the User entity. +type User struct { + ent.Schema +} + +// Fields of the User. +func (User) Fields() []ent.Field { + return []ent.Field{ + field.String("email"). + Unique(). + NotEmpty(), + field.String("password_hash"). + Sensitive(). + NotEmpty(), + field.Enum("status"). + Values("active", "inactive", "banned"). + Default("active"), + field.Time("created_at"). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +// Edges of the User. +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("roles", Role.Type), + edge.To("contributors", Contributor.Type), + edge.To("media", Media.Type), + } +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go new file mode 100644 index 0000000..301d572 --- /dev/null +++ b/backend/ent/tx.go @@ -0,0 +1,255 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Category is the client for interacting with the Category builders. + Category *CategoryClient + // CategoryContent is the client for interacting with the CategoryContent builders. + CategoryContent *CategoryContentClient + // Contributor is the client for interacting with the Contributor builders. + Contributor *ContributorClient + // ContributorRole is the client for interacting with the ContributorRole builders. + ContributorRole *ContributorRoleClient + // ContributorSocialLink is the client for interacting with the ContributorSocialLink builders. + ContributorSocialLink *ContributorSocialLinkClient + // Daily is the client for interacting with the Daily builders. + Daily *DailyClient + // DailyCategory is the client for interacting with the DailyCategory builders. + DailyCategory *DailyCategoryClient + // DailyCategoryContent is the client for interacting with the DailyCategoryContent builders. + DailyCategoryContent *DailyCategoryContentClient + // DailyContent is the client for interacting with the DailyContent builders. + DailyContent *DailyContentClient + // Media is the client for interacting with the Media builders. + Media *MediaClient + // Permission is the client for interacting with the Permission builders. + Permission *PermissionClient + // Post is the client for interacting with the Post builders. + Post *PostClient + // PostContent is the client for interacting with the PostContent builders. + PostContent *PostContentClient + // PostContributor is the client for interacting with the PostContributor builders. + PostContributor *PostContributorClient + // Role is the client for interacting with the Role builders. + Role *RoleClient + // User is the client for interacting with the User builders. + User *UserClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Category = NewCategoryClient(tx.config) + tx.CategoryContent = NewCategoryContentClient(tx.config) + tx.Contributor = NewContributorClient(tx.config) + tx.ContributorRole = NewContributorRoleClient(tx.config) + tx.ContributorSocialLink = NewContributorSocialLinkClient(tx.config) + tx.Daily = NewDailyClient(tx.config) + tx.DailyCategory = NewDailyCategoryClient(tx.config) + tx.DailyCategoryContent = NewDailyCategoryContentClient(tx.config) + tx.DailyContent = NewDailyContentClient(tx.config) + tx.Media = NewMediaClient(tx.config) + tx.Permission = NewPermissionClient(tx.config) + tx.Post = NewPostClient(tx.config) + tx.PostContent = NewPostContentClient(tx.config) + tx.PostContributor = NewPostContributorClient(tx.config) + tx.Role = NewRoleClient(tx.config) + tx.User = NewUserClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Category.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/backend/ent/user.go b/backend/ent/user.go new file mode 100644 index 0000000..f67a040 --- /dev/null +++ b/backend/ent/user.go @@ -0,0 +1,207 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Email holds the value of the "email" field. + Email string `json:"email,omitempty"` + // PasswordHash holds the value of the "password_hash" field. + PasswordHash string `json:"-"` + // Status holds the value of the "status" field. + Status user.Status `json:"status,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserQuery when eager-loading is set. + Edges UserEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserEdges holds the relations/edges for other nodes in the graph. +type UserEdges struct { + // Roles holds the value of the roles edge. + Roles []*Role `json:"roles,omitempty"` + // Contributors holds the value of the contributors edge. + Contributors []*Contributor `json:"contributors,omitempty"` + // Media holds the value of the media edge. + Media []*Media `json:"media,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// RolesOrErr returns the Roles value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) RolesOrErr() ([]*Role, error) { + if e.loadedTypes[0] { + return e.Roles, nil + } + return nil, &NotLoadedError{edge: "roles"} +} + +// ContributorsOrErr returns the Contributors value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) ContributorsOrErr() ([]*Contributor, error) { + if e.loadedTypes[1] { + return e.Contributors, nil + } + return nil, &NotLoadedError{edge: "contributors"} +} + +// MediaOrErr returns the Media value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) MediaOrErr() ([]*Media, error) { + if e.loadedTypes[2] { + return e.Media, nil + } + return nil, &NotLoadedError{edge: "media"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldID: + values[i] = new(sql.NullInt64) + case user.FieldEmail, user.FieldPasswordHash, user.FieldStatus: + values[i] = new(sql.NullString) + case user.FieldCreatedAt, user.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (u *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + u.ID = int(value.Int64) + case user.FieldEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field email", values[i]) + } else if value.Valid { + u.Email = value.String + } + case user.FieldPasswordHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password_hash", values[i]) + } else if value.Valid { + u.PasswordHash = value.String + } + case user.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + u.Status = user.Status(value.String) + } + case user.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + u.CreatedAt = value.Time + } + case user.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + u.UpdatedAt = value.Time + } + default: + u.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + +// QueryRoles queries the "roles" edge of the User entity. +func (u *User) QueryRoles() *RoleQuery { + return NewUserClient(u.config).QueryRoles(u) +} + +// QueryContributors queries the "contributors" edge of the User entity. +func (u *User) QueryContributors() *ContributorQuery { + return NewUserClient(u.config).QueryContributors(u) +} + +// QueryMedia queries the "media" edge of the User entity. +func (u *User) QueryMedia() *MediaQuery { + return NewUserClient(u.config).QueryMedia(u) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (u *User) Update() *UserUpdateOne { + return NewUserClient(u.config).UpdateOne(u) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (u *User) Unwrap() *User { + _tx, ok := u.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + u.config.driver = _tx.drv + return u +} + +// String implements the fmt.Stringer. +func (u *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString("email=") + builder.WriteString(u.Email) + builder.WriteString(", ") + builder.WriteString("password_hash=") + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", u.Status)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(u.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(u.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go new file mode 100644 index 0000000..9fe4c09 --- /dev/null +++ b/backend/ent/user/user.go @@ -0,0 +1,217 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldEmail holds the string denoting the email field in the database. + FieldEmail = "email" + // FieldPasswordHash holds the string denoting the password_hash field in the database. + FieldPasswordHash = "password_hash" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeRoles holds the string denoting the roles edge name in mutations. + EdgeRoles = "roles" + // EdgeContributors holds the string denoting the contributors edge name in mutations. + EdgeContributors = "contributors" + // EdgeMedia holds the string denoting the media edge name in mutations. + EdgeMedia = "media" + // Table holds the table name of the user in the database. + Table = "users" + // RolesTable is the table that holds the roles relation/edge. The primary key declared below. + RolesTable = "user_roles" + // RolesInverseTable is the table name for the Role entity. + // It exists in this package in order to avoid circular dependency with the "role" package. + RolesInverseTable = "roles" + // ContributorsTable is the table that holds the contributors relation/edge. + ContributorsTable = "contributors" + // ContributorsInverseTable is the table name for the Contributor entity. + // It exists in this package in order to avoid circular dependency with the "contributor" package. + ContributorsInverseTable = "contributors" + // ContributorsColumn is the table column denoting the contributors relation/edge. + ContributorsColumn = "user_contributors" + // MediaTable is the table that holds the media relation/edge. + MediaTable = "media" + // MediaInverseTable is the table name for the Media entity. + // It exists in this package in order to avoid circular dependency with the "media" package. + MediaInverseTable = "media" + // MediaColumn is the table column denoting the media relation/edge. + MediaColumn = "user_media" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldEmail, + FieldPasswordHash, + FieldStatus, + FieldCreatedAt, + FieldUpdatedAt, +} + +var ( + // RolesPrimaryKey and RolesColumn2 are the table columns denoting the + // primary key for the roles relation (M2M). + RolesPrimaryKey = []string{"user_id", "role_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // EmailValidator is a validator for the "email" field. It is called by the builders before save. + EmailValidator func(string) error + // PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + PasswordHashValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// Status defines the type for the "status" enum field. +type Status string + +// StatusActive is the default value of the Status enum. +const DefaultStatus = StatusActive + +// Status values. +const ( + StatusActive Status = "active" + StatusInactive Status = "inactive" + StatusBanned Status = "banned" +) + +func (s Status) String() string { + return string(s) +} + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s Status) error { + switch s { + case StatusActive, StatusInactive, StatusBanned: + return nil + default: + return fmt.Errorf("user: invalid enum value for status field: %q", s) + } +} + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPasswordHash orders the results by the password_hash field. +func ByPasswordHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPasswordHash, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByRolesCount orders the results by roles count. +func ByRolesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRolesStep(), opts...) + } +} + +// ByRoles orders the results by roles terms. +func ByRoles(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRolesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByContributorsCount orders the results by contributors count. +func ByContributorsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newContributorsStep(), opts...) + } +} + +// ByContributors orders the results by contributors terms. +func ByContributors(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newContributorsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByMediaCount orders the results by media count. +func ByMediaCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMediaStep(), opts...) + } +} + +// ByMedia orders the results by media terms. +func ByMedia(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMediaStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newRolesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RolesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, RolesTable, RolesPrimaryKey...), + ) +} +func newContributorsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ContributorsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContributorsTable, ContributorsColumn), + ) +} +func newMediaStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MediaInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MediaTable, MediaColumn), + ) +} diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go new file mode 100644 index 0000000..040d8a9 --- /dev/null +++ b/backend/ent/user/where.go @@ -0,0 +1,390 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + "tss-rocks-be/ent/predicate" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// Email applies equality check predicate on the "email" field. It's identical to EmailEQ. +func Email(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// PasswordHash applies equality check predicate on the "password_hash" field. It's identical to PasswordHashEQ. +func PasswordHash(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// EmailEQ applies the EQ predicate on the "email" field. +func EmailEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// EmailNEQ applies the NEQ predicate on the "email" field. +func EmailNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldEmail, v)) +} + +// EmailIn applies the In predicate on the "email" field. +func EmailIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldEmail, vs...)) +} + +// EmailNotIn applies the NotIn predicate on the "email" field. +func EmailNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldEmail, vs...)) +} + +// EmailGT applies the GT predicate on the "email" field. +func EmailGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldEmail, v)) +} + +// EmailGTE applies the GTE predicate on the "email" field. +func EmailGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldEmail, v)) +} + +// EmailLT applies the LT predicate on the "email" field. +func EmailLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldEmail, v)) +} + +// EmailLTE applies the LTE predicate on the "email" field. +func EmailLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldEmail, v)) +} + +// EmailContains applies the Contains predicate on the "email" field. +func EmailContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldEmail, v)) +} + +// EmailHasPrefix applies the HasPrefix predicate on the "email" field. +func EmailHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldEmail, v)) +} + +// EmailHasSuffix applies the HasSuffix predicate on the "email" field. +func EmailHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldEmail, v)) +} + +// EmailEqualFold applies the EqualFold predicate on the "email" field. +func EmailEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldEmail, v)) +} + +// EmailContainsFold applies the ContainsFold predicate on the "email" field. +func EmailContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldEmail, v)) +} + +// PasswordHashEQ applies the EQ predicate on the "password_hash" field. +func PasswordHashEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// PasswordHashNEQ applies the NEQ predicate on the "password_hash" field. +func PasswordHashNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldPasswordHash, v)) +} + +// PasswordHashIn applies the In predicate on the "password_hash" field. +func PasswordHashIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldPasswordHash, vs...)) +} + +// PasswordHashNotIn applies the NotIn predicate on the "password_hash" field. +func PasswordHashNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldPasswordHash, vs...)) +} + +// PasswordHashGT applies the GT predicate on the "password_hash" field. +func PasswordHashGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldPasswordHash, v)) +} + +// PasswordHashGTE applies the GTE predicate on the "password_hash" field. +func PasswordHashGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldPasswordHash, v)) +} + +// PasswordHashLT applies the LT predicate on the "password_hash" field. +func PasswordHashLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldPasswordHash, v)) +} + +// PasswordHashLTE applies the LTE predicate on the "password_hash" field. +func PasswordHashLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldPasswordHash, v)) +} + +// PasswordHashContains applies the Contains predicate on the "password_hash" field. +func PasswordHashContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldPasswordHash, v)) +} + +// PasswordHashHasPrefix applies the HasPrefix predicate on the "password_hash" field. +func PasswordHashHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldPasswordHash, v)) +} + +// PasswordHashHasSuffix applies the HasSuffix predicate on the "password_hash" field. +func PasswordHashHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldPasswordHash, v)) +} + +// PasswordHashEqualFold applies the EqualFold predicate on the "password_hash" field. +func PasswordHashEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldPasswordHash, v)) +} + +// PasswordHashContainsFold applies the ContainsFold predicate on the "password_hash" field. +func PasswordHashContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldPasswordHash, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v Status) predicate.User { + return predicate.User(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v Status) predicate.User { + return predicate.User(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...Status) predicate.User { + return predicate.User(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...Status) predicate.User { + return predicate.User(sql.FieldNotIn(FieldStatus, vs...)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasRoles applies the HasEdge predicate on the "roles" edge. +func HasRoles() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, RolesTable, RolesPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates). +func HasRolesWith(preds ...predicate.Role) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newRolesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasContributors applies the HasEdge predicate on the "contributors" edge. +func HasContributors() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ContributorsTable, ContributorsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasContributorsWith applies the HasEdge predicate on the "contributors" edge with a given conditions (other predicates). +func HasContributorsWith(preds ...predicate.Contributor) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newContributorsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasMedia applies the HasEdge predicate on the "media" edge. +func HasMedia() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MediaTable, MediaColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMediaWith applies the HasEdge predicate on the "media" edge with a given conditions (other predicates). +func HasMediaWith(preds ...predicate.Media) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newMediaStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go new file mode 100644 index 0000000..49463c8 --- /dev/null +++ b/backend/ent/user_create.go @@ -0,0 +1,389 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook +} + +// SetEmail sets the "email" field. +func (uc *UserCreate) SetEmail(s string) *UserCreate { + uc.mutation.SetEmail(s) + return uc +} + +// SetPasswordHash sets the "password_hash" field. +func (uc *UserCreate) SetPasswordHash(s string) *UserCreate { + uc.mutation.SetPasswordHash(s) + return uc +} + +// SetStatus sets the "status" field. +func (uc *UserCreate) SetStatus(u user.Status) *UserCreate { + uc.mutation.SetStatus(u) + return uc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (uc *UserCreate) SetNillableStatus(u *user.Status) *UserCreate { + if u != nil { + uc.SetStatus(*u) + } + return uc +} + +// SetCreatedAt sets the "created_at" field. +func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate { + uc.mutation.SetCreatedAt(t) + return uc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetCreatedAt(*t) + } + return uc +} + +// SetUpdatedAt sets the "updated_at" field. +func (uc *UserCreate) SetUpdatedAt(t time.Time) *UserCreate { + uc.mutation.SetUpdatedAt(t) + return uc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableUpdatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetUpdatedAt(*t) + } + return uc +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (uc *UserCreate) AddRoleIDs(ids ...int) *UserCreate { + uc.mutation.AddRoleIDs(ids...) + return uc +} + +// AddRoles adds the "roles" edges to the Role entity. +func (uc *UserCreate) AddRoles(r ...*Role) *UserCreate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return uc.AddRoleIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the Contributor entity by IDs. +func (uc *UserCreate) AddContributorIDs(ids ...int) *UserCreate { + uc.mutation.AddContributorIDs(ids...) + return uc +} + +// AddContributors adds the "contributors" edges to the Contributor entity. +func (uc *UserCreate) AddContributors(c ...*Contributor) *UserCreate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return uc.AddContributorIDs(ids...) +} + +// AddMediumIDs adds the "media" edge to the Media entity by IDs. +func (uc *UserCreate) AddMediumIDs(ids ...int) *UserCreate { + uc.mutation.AddMediumIDs(ids...) + return uc +} + +// AddMedia adds the "media" edges to the Media entity. +func (uc *UserCreate) AddMedia(m ...*Media) *UserCreate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return uc.AddMediumIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uc *UserCreate) Mutation() *UserMutation { + return uc.mutation +} + +// Save creates the User in the database. +func (uc *UserCreate) Save(ctx context.Context) (*User, error) { + uc.defaults() + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (uc *UserCreate) SaveX(ctx context.Context) *User { + v, err := uc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (uc *UserCreate) Exec(ctx context.Context) error { + _, err := uc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uc *UserCreate) ExecX(ctx context.Context) { + if err := uc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uc *UserCreate) defaults() { + if _, ok := uc.mutation.Status(); !ok { + v := user.DefaultStatus + uc.mutation.SetStatus(v) + } + if _, ok := uc.mutation.CreatedAt(); !ok { + v := user.DefaultCreatedAt() + uc.mutation.SetCreatedAt(v) + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + v := user.DefaultUpdatedAt() + uc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uc *UserCreate) check() error { + if _, ok := uc.mutation.Email(); !ok { + return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)} + } + if v, ok := uc.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if _, ok := uc.mutation.PasswordHash(); !ok { + return &ValidationError{Name: "password_hash", err: errors.New(`ent: missing required field "User.password_hash"`)} + } + if v, ok := uc.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if _, ok := uc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "User.status"`)} + } + if v, ok := uc.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if _, ok := uc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} + } + return nil +} + +func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := uc.check(); err != nil { + return nil, err + } + _node, _spec := uc.createSpec() + if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + uc.mutation.id = &_node.ID + uc.mutation.done = true + return _node, nil +} + +func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: uc.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + ) + if value, ok := uc.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + _node.Email = value + } + if value, ok := uc.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + _node.PasswordHash = value + } + if value, ok := uc.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := uc.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := uc.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := uc.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.MediaIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate +} + +// Save creates the User entities in the database. +func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) + nodes := make([]*User, len(ucb.builders)) + mutators := make([]Mutator, len(ucb.builders)) + for i := range ucb.builders { + func(i int, root context.Context) { + builder := ucb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := ucb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ucb *UserCreateBulk) Exec(ctx context.Context) error { + _, err := ucb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ucb *UserCreateBulk) ExecX(ctx context.Context) { + if err := ucb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_delete.go b/backend/ent/user_delete.go new file mode 100644 index 0000000..4fa11a9 --- /dev/null +++ b/backend/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { + ud.mutation.Where(ps...) + return ud +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ud *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ud *UserDelete) ExecX(ctx context.Context) int { + n, err := ud.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + if ps := ud.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ud.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + ud *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + udo.ud.mutation.Where(ps...) + return udo +} + +// Exec executes the deletion query. +func (udo *UserDeleteOne) Exec(ctx context.Context) error { + n, err := udo.ud.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (udo *UserDeleteOne) ExecX(ctx context.Context) { + if err := udo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go new file mode 100644 index 0000000..1cd778d --- /dev/null +++ b/backend/ent/user_query.go @@ -0,0 +1,787 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withRoles *RoleQuery + withContributors *ContributorQuery + withMedia *MediaQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { + uq.predicates = append(uq.predicates, ps...) + return uq +} + +// Limit the number of records to be returned by this query. +func (uq *UserQuery) Limit(limit int) *UserQuery { + uq.ctx.Limit = &limit + return uq +} + +// Offset to start from. +func (uq *UserQuery) Offset(offset int) *UserQuery { + uq.ctx.Offset = &offset + return uq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (uq *UserQuery) Unique(unique bool) *UserQuery { + uq.ctx.Unique = &unique + return uq +} + +// Order specifies how the records should be ordered. +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { + uq.order = append(uq.order, o...) + return uq +} + +// QueryRoles chains the current query on the "roles" edge. +func (uq *UserQuery) QueryRoles() *RoleQuery { + query := (&RoleClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(role.Table, role.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.RolesTable, user.RolesPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryContributors chains the current query on the "contributors" edge. +func (uq *UserQuery) QueryContributors() *ContributorQuery { + query := (&ContributorClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(contributor.Table, contributor.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.ContributorsTable, user.ContributorsColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryMedia chains the current query on the "media" edge. +func (uq *UserQuery) QueryMedia() *MediaQuery { + query := (&MediaClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(media.Table, media.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.MediaTable, user.MediaColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (uq *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (uq *UserQuery) FirstX(ctx context.Context) *User { + node, err := uq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (uq *UserQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (uq *UserQuery) FirstIDX(ctx context.Context) int { + id, err := uq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (uq *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (uq *UserQuery) OnlyX(ctx context.Context) *User { + node, err := uq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (uq *UserQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (uq *UserQuery) OnlyIDX(ctx context.Context) int { + id, err := uq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, uq.ctx, ent.OpQueryAll) + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, uq, qr, uq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (uq *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := uq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (uq *UserQuery) IDs(ctx context.Context) (ids []int, err error) { + if uq.ctx.Unique == nil && uq.path != nil { + uq.Unique(true) + } + ctx = setContextOp(ctx, uq.ctx, ent.OpQueryIDs) + if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (uq *UserQuery) IDsX(ctx context.Context) []int { + ids, err := uq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (uq *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, uq.ctx, ent.OpQueryCount) + if err := uq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (uq *UserQuery) CountX(ctx context.Context) int { + count, err := uq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, uq.ctx, ent.OpQueryExist) + switch _, err := uq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (uq *UserQuery) ExistX(ctx context.Context) bool { + exist, err := uq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (uq *UserQuery) Clone() *UserQuery { + if uq == nil { + return nil + } + return &UserQuery{ + config: uq.config, + ctx: uq.ctx.Clone(), + order: append([]user.OrderOption{}, uq.order...), + inters: append([]Interceptor{}, uq.inters...), + predicates: append([]predicate.User{}, uq.predicates...), + withRoles: uq.withRoles.Clone(), + withContributors: uq.withContributors.Clone(), + withMedia: uq.withMedia.Clone(), + // clone intermediate query. + sql: uq.sql.Clone(), + path: uq.path, + } +} + +// WithRoles tells the query-builder to eager-load the nodes that are connected to +// the "roles" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithRoles(opts ...func(*RoleQuery)) *UserQuery { + query := (&RoleClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withRoles = query + return uq +} + +// WithContributors tells the query-builder to eager-load the nodes that are connected to +// the "contributors" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithContributors(opts ...func(*ContributorQuery)) *UserQuery { + query := (&ContributorClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withContributors = query + return uq +} + +// WithMedia tells the query-builder to eager-load the nodes that are connected to +// the "media" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithMedia(opts ...func(*MediaQuery)) *UserQuery { + query := (&MediaClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withMedia = query + return uq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Email string `json:"email,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldEmail). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + uq.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: uq} + grbuild.flds = &uq.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Email string `json:"email,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldEmail). +// Scan(ctx, &v) +func (uq *UserQuery) Select(fields ...string) *UserSelect { + uq.ctx.Fields = append(uq.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: uq} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return uq.Select().Aggregate(fns...) +} + +func (uq *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range uq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, uq); err != nil { + return err + } + } + } + for _, f := range uq.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if uq.path != nil { + prev, err := uq.path(ctx) + if err != nil { + return err + } + uq.sql = prev + } + return nil +} + +func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + _spec = uq.querySpec() + loadedTypes = [3]bool{ + uq.withRoles != nil, + uq.withContributors != nil, + uq.withMedia != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: uq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := uq.withRoles; query != nil { + if err := uq.loadRoles(ctx, query, nodes, + func(n *User) { n.Edges.Roles = []*Role{} }, + func(n *User, e *Role) { n.Edges.Roles = append(n.Edges.Roles, e) }); err != nil { + return nil, err + } + } + if query := uq.withContributors; query != nil { + if err := uq.loadContributors(ctx, query, nodes, + func(n *User) { n.Edges.Contributors = []*Contributor{} }, + func(n *User, e *Contributor) { n.Edges.Contributors = append(n.Edges.Contributors, e) }); err != nil { + return nil, err + } + } + if query := uq.withMedia; query != nil { + if err := uq.loadMedia(ctx, query, nodes, + func(n *User) { n.Edges.Media = []*Media{} }, + func(n *User, e *Media) { n.Edges.Media = append(n.Edges.Media, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (uq *UserQuery) loadRoles(ctx context.Context, query *RoleQuery, nodes []*User, init func(*User), assign func(*User, *Role)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*User) + nids := make(map[int]map[*User]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(user.RolesTable) + s.Join(joinT).On(s.C(role.FieldID), joinT.C(user.RolesPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(user.RolesPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(user.RolesPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*User]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Role](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "roles" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (uq *UserQuery) loadContributors(ctx context.Context, query *ContributorQuery, nodes []*User, init func(*User), assign func(*User, *Contributor)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Contributor(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.ContributorsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.user_contributors + if fk == nil { + return fmt.Errorf(`foreign-key "user_contributors" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_contributors" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (uq *UserQuery) loadMedia(ctx context.Context, query *MediaQuery, nodes []*User, init func(*User), assign func(*User, *Media)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Media(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.MediaColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.user_media + if fk == nil { + return fmt.Errorf(`foreign-key "user_media" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_media" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := uq.querySpec() + _spec.Node.Columns = uq.ctx.Fields + if len(uq.ctx.Fields) > 0 { + _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, uq.driver, _spec) +} + +func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + _spec.From = uq.sql + if unique := uq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if uq.path != nil { + _spec.Unique = true + } + if fields := uq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := uq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := uq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := uq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := uq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(uq.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := uq.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if uq.sql != nil { + selector = uq.sql + selector.Select(selector.Columns(columns...)...) + } + if uq.ctx.Unique != nil && *uq.ctx.Unique { + selector.Distinct() + } + for _, p := range uq.predicates { + p(selector) + } + for _, p := range uq.order { + p(selector) + } + if offset := uq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := uq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb +} + +// Scan applies the selector query and scans the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ugb.build.ctx, ent.OpQueryGroupBy) + if err := ugb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) +} + +func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ugb.fns)) + for _, fn := range ugb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) + for _, f := range *ugb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ugb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + us.fns = append(us.fns, fns...) + return us +} + +// Scan applies the selector query and scans the result into the given value. +func (us *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, us.ctx, ent.OpQuerySelect) + if err := us.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) +} + +func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(us.fns)) + for _, fn := range us.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*us.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := us.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go new file mode 100644 index 0000000..2c9c371 --- /dev/null +++ b/backend/ent/user_update.go @@ -0,0 +1,883 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/media" + "tss-rocks-be/ent/predicate" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + uu.mutation.Where(ps...) + return uu +} + +// SetEmail sets the "email" field. +func (uu *UserUpdate) SetEmail(s string) *UserUpdate { + uu.mutation.SetEmail(s) + return uu +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate { + if s != nil { + uu.SetEmail(*s) + } + return uu +} + +// SetPasswordHash sets the "password_hash" field. +func (uu *UserUpdate) SetPasswordHash(s string) *UserUpdate { + uu.mutation.SetPasswordHash(s) + return uu +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (uu *UserUpdate) SetNillablePasswordHash(s *string) *UserUpdate { + if s != nil { + uu.SetPasswordHash(*s) + } + return uu +} + +// SetStatus sets the "status" field. +func (uu *UserUpdate) SetStatus(u user.Status) *UserUpdate { + uu.mutation.SetStatus(u) + return uu +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (uu *UserUpdate) SetNillableStatus(u *user.Status) *UserUpdate { + if u != nil { + uu.SetStatus(*u) + } + return uu +} + +// SetCreatedAt sets the "created_at" field. +func (uu *UserUpdate) SetCreatedAt(t time.Time) *UserUpdate { + uu.mutation.SetCreatedAt(t) + return uu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uu *UserUpdate) SetNillableCreatedAt(t *time.Time) *UserUpdate { + if t != nil { + uu.SetCreatedAt(*t) + } + return uu +} + +// SetUpdatedAt sets the "updated_at" field. +func (uu *UserUpdate) SetUpdatedAt(t time.Time) *UserUpdate { + uu.mutation.SetUpdatedAt(t) + return uu +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (uu *UserUpdate) AddRoleIDs(ids ...int) *UserUpdate { + uu.mutation.AddRoleIDs(ids...) + return uu +} + +// AddRoles adds the "roles" edges to the Role entity. +func (uu *UserUpdate) AddRoles(r ...*Role) *UserUpdate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return uu.AddRoleIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the Contributor entity by IDs. +func (uu *UserUpdate) AddContributorIDs(ids ...int) *UserUpdate { + uu.mutation.AddContributorIDs(ids...) + return uu +} + +// AddContributors adds the "contributors" edges to the Contributor entity. +func (uu *UserUpdate) AddContributors(c ...*Contributor) *UserUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return uu.AddContributorIDs(ids...) +} + +// AddMediumIDs adds the "media" edge to the Media entity by IDs. +func (uu *UserUpdate) AddMediumIDs(ids ...int) *UserUpdate { + uu.mutation.AddMediumIDs(ids...) + return uu +} + +// AddMedia adds the "media" edges to the Media entity. +func (uu *UserUpdate) AddMedia(m ...*Media) *UserUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return uu.AddMediumIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uu *UserUpdate) Mutation() *UserMutation { + return uu.mutation +} + +// ClearRoles clears all "roles" edges to the Role entity. +func (uu *UserUpdate) ClearRoles() *UserUpdate { + uu.mutation.ClearRoles() + return uu +} + +// RemoveRoleIDs removes the "roles" edge to Role entities by IDs. +func (uu *UserUpdate) RemoveRoleIDs(ids ...int) *UserUpdate { + uu.mutation.RemoveRoleIDs(ids...) + return uu +} + +// RemoveRoles removes "roles" edges to Role entities. +func (uu *UserUpdate) RemoveRoles(r ...*Role) *UserUpdate { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return uu.RemoveRoleIDs(ids...) +} + +// ClearContributors clears all "contributors" edges to the Contributor entity. +func (uu *UserUpdate) ClearContributors() *UserUpdate { + uu.mutation.ClearContributors() + return uu +} + +// RemoveContributorIDs removes the "contributors" edge to Contributor entities by IDs. +func (uu *UserUpdate) RemoveContributorIDs(ids ...int) *UserUpdate { + uu.mutation.RemoveContributorIDs(ids...) + return uu +} + +// RemoveContributors removes "contributors" edges to Contributor entities. +func (uu *UserUpdate) RemoveContributors(c ...*Contributor) *UserUpdate { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return uu.RemoveContributorIDs(ids...) +} + +// ClearMedia clears all "media" edges to the Media entity. +func (uu *UserUpdate) ClearMedia() *UserUpdate { + uu.mutation.ClearMedia() + return uu +} + +// RemoveMediumIDs removes the "media" edge to Media entities by IDs. +func (uu *UserUpdate) RemoveMediumIDs(ids ...int) *UserUpdate { + uu.mutation.RemoveMediumIDs(ids...) + return uu +} + +// RemoveMedia removes "media" edges to Media entities. +func (uu *UserUpdate) RemoveMedia(m ...*Media) *UserUpdate { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return uu.RemoveMediumIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (uu *UserUpdate) Save(ctx context.Context) (int, error) { + uu.defaults() + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uu *UserUpdate) SaveX(ctx context.Context) int { + affected, err := uu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (uu *UserUpdate) Exec(ctx context.Context) error { + _, err := uu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uu *UserUpdate) ExecX(ctx context.Context) { + if err := uu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uu *UserUpdate) defaults() { + if _, ok := uu.mutation.UpdatedAt(); !ok { + v := user.UpdateDefaultUpdatedAt() + uu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uu *UserUpdate) check() error { + if v, ok := uu.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := uu.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := uu.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + return nil +} + +func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := uu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + if ps := uu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uu.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := uu.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := uu.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeEnum, value) + } + if value, ok := uu.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := uu.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if uu.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedRolesIDs(); len(nodes) > 0 && !uu.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedContributorsIDs(); len(nodes) > 0 && !uu.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.MediaCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedMediaIDs(); len(nodes) > 0 && !uu.mutation.MediaCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.MediaIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + uu.mutation.done = true + return n, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetEmail sets the "email" field. +func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne { + uuo.mutation.SetEmail(s) + return uuo +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne { + if s != nil { + uuo.SetEmail(*s) + } + return uuo +} + +// SetPasswordHash sets the "password_hash" field. +func (uuo *UserUpdateOne) SetPasswordHash(s string) *UserUpdateOne { + uuo.mutation.SetPasswordHash(s) + return uuo +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillablePasswordHash(s *string) *UserUpdateOne { + if s != nil { + uuo.SetPasswordHash(*s) + } + return uuo +} + +// SetStatus sets the "status" field. +func (uuo *UserUpdateOne) SetStatus(u user.Status) *UserUpdateOne { + uuo.mutation.SetStatus(u) + return uuo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableStatus(u *user.Status) *UserUpdateOne { + if u != nil { + uuo.SetStatus(*u) + } + return uuo +} + +// SetCreatedAt sets the "created_at" field. +func (uuo *UserUpdateOne) SetCreatedAt(t time.Time) *UserUpdateOne { + uuo.mutation.SetCreatedAt(t) + return uuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableCreatedAt(t *time.Time) *UserUpdateOne { + if t != nil { + uuo.SetCreatedAt(*t) + } + return uuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (uuo *UserUpdateOne) SetUpdatedAt(t time.Time) *UserUpdateOne { + uuo.mutation.SetUpdatedAt(t) + return uuo +} + +// AddRoleIDs adds the "roles" edge to the Role entity by IDs. +func (uuo *UserUpdateOne) AddRoleIDs(ids ...int) *UserUpdateOne { + uuo.mutation.AddRoleIDs(ids...) + return uuo +} + +// AddRoles adds the "roles" edges to the Role entity. +func (uuo *UserUpdateOne) AddRoles(r ...*Role) *UserUpdateOne { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return uuo.AddRoleIDs(ids...) +} + +// AddContributorIDs adds the "contributors" edge to the Contributor entity by IDs. +func (uuo *UserUpdateOne) AddContributorIDs(ids ...int) *UserUpdateOne { + uuo.mutation.AddContributorIDs(ids...) + return uuo +} + +// AddContributors adds the "contributors" edges to the Contributor entity. +func (uuo *UserUpdateOne) AddContributors(c ...*Contributor) *UserUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return uuo.AddContributorIDs(ids...) +} + +// AddMediumIDs adds the "media" edge to the Media entity by IDs. +func (uuo *UserUpdateOne) AddMediumIDs(ids ...int) *UserUpdateOne { + uuo.mutation.AddMediumIDs(ids...) + return uuo +} + +// AddMedia adds the "media" edges to the Media entity. +func (uuo *UserUpdateOne) AddMedia(m ...*Media) *UserUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return uuo.AddMediumIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uuo *UserUpdateOne) Mutation() *UserMutation { + return uuo.mutation +} + +// ClearRoles clears all "roles" edges to the Role entity. +func (uuo *UserUpdateOne) ClearRoles() *UserUpdateOne { + uuo.mutation.ClearRoles() + return uuo +} + +// RemoveRoleIDs removes the "roles" edge to Role entities by IDs. +func (uuo *UserUpdateOne) RemoveRoleIDs(ids ...int) *UserUpdateOne { + uuo.mutation.RemoveRoleIDs(ids...) + return uuo +} + +// RemoveRoles removes "roles" edges to Role entities. +func (uuo *UserUpdateOne) RemoveRoles(r ...*Role) *UserUpdateOne { + ids := make([]int, len(r)) + for i := range r { + ids[i] = r[i].ID + } + return uuo.RemoveRoleIDs(ids...) +} + +// ClearContributors clears all "contributors" edges to the Contributor entity. +func (uuo *UserUpdateOne) ClearContributors() *UserUpdateOne { + uuo.mutation.ClearContributors() + return uuo +} + +// RemoveContributorIDs removes the "contributors" edge to Contributor entities by IDs. +func (uuo *UserUpdateOne) RemoveContributorIDs(ids ...int) *UserUpdateOne { + uuo.mutation.RemoveContributorIDs(ids...) + return uuo +} + +// RemoveContributors removes "contributors" edges to Contributor entities. +func (uuo *UserUpdateOne) RemoveContributors(c ...*Contributor) *UserUpdateOne { + ids := make([]int, len(c)) + for i := range c { + ids[i] = c[i].ID + } + return uuo.RemoveContributorIDs(ids...) +} + +// ClearMedia clears all "media" edges to the Media entity. +func (uuo *UserUpdateOne) ClearMedia() *UserUpdateOne { + uuo.mutation.ClearMedia() + return uuo +} + +// RemoveMediumIDs removes the "media" edge to Media entities by IDs. +func (uuo *UserUpdateOne) RemoveMediumIDs(ids ...int) *UserUpdateOne { + uuo.mutation.RemoveMediumIDs(ids...) + return uuo +} + +// RemoveMedia removes "media" edges to Media entities. +func (uuo *UserUpdateOne) RemoveMedia(m ...*Media) *UserUpdateOne { + ids := make([]int, len(m)) + for i := range m { + ids[i] = m[i].ID + } + return uuo.RemoveMediumIDs(ids...) +} + +// Where appends a list predicates to the UserUpdate builder. +func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + uuo.mutation.Where(ps...) + return uuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + uuo.fields = append([]string{field}, fields...) + return uuo +} + +// Save executes the query and returns the updated User entity. +func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { + uuo.defaults() + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := uuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (uuo *UserUpdateOne) Exec(ctx context.Context) error { + _, err := uuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uuo *UserUpdateOne) ExecX(ctx context.Context) { + if err := uuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uuo *UserUpdateOne) defaults() { + if _, ok := uuo.mutation.UpdatedAt(); !ok { + v := user.UpdateDefaultUpdatedAt() + uuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uuo *UserUpdateOne) check() error { + if v, ok := uuo.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := uuo.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := uuo.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + return nil +} + +func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + if err := uuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + id, ok := uuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := uuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := uuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uuo.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := uuo.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := uuo.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeEnum, value) + } + if value, ok := uuo.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := uuo.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if uuo.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedRolesIDs(); len(nodes) > 0 && !uuo.mutation.RolesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RolesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.RolesTable, + Columns: user.RolesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(role.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedContributorsIDs(); len(nodes) > 0 && !uuo.mutation.ContributorsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.ContributorsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.ContributorsTable, + Columns: []string{user.ContributorsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(contributor.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.MediaCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedMediaIDs(); len(nodes) > 0 && !uuo.mutation.MediaCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.MediaIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.MediaTable, + Columns: []string{user.MediaColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(media.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &User{config: uuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + uuo.mutation.done = true + return _node, nil +} diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 0000000..5d2440a --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,81 @@ +module tss-rocks-be + +go 1.23.6 + +require ( + bou.ke/monkey v1.0.2 + entgo.io/ent v0.14.1 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/config v1.29.6 + github.com/aws/aws-sdk-go-v2/credentials v1.17.59 + github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 + github.com/chai2010/webp v1.1.1 + github.com/gin-gonic/gin v1.10.0 + github.com/golang-jwt/jwt/v5 v5.2.1 + github.com/google/uuid v1.6.0 + github.com/mattn/go-sqlite3 v1.14.24 + github.com/rs/zerolog v1.33.0 + github.com/stretchr/testify v1.10.0 + go.uber.org/mock v0.5.0 + golang.org/x/crypto v0.33.0 + golang.org/x/time v0.10.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + ariga.io/atlas v0.31.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect + github.com/aws/smithy-go v1.22.2 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/bytedance/sonic v1.12.8 // indirect + github.com/bytedance/sonic/loader v0.2.3 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gin-contrib/sse v1.0.0 // indirect + github.com/go-openapi/inflect v0.21.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.24.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect + golang.org/x/arch v0.14.0 // indirect + golang.org/x/mod v0.23.0 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/tools v0.30.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 0000000..2c155ee --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,192 @@ +ariga.io/atlas v0.31.0 h1:Nw6/Jdc7OpZfiy6oh/dJAYPp5XxGYvMTWLOUutwWjeY= +ariga.io/atlas v0.31.0/go.mod h1:J3chwsQAgjDF6Ostz7JmJJRTCbtqIupUbVR/gqZrMiA= +bou.ke/monkey v1.0.2 h1:kWcnsrCNUatbxncxR/ThdYqbytgOIArtYWqcQLQzKLI= +bou.ke/monkey v1.0.2/go.mod h1:OqickVX3tNx6t33n1xvtTtu85YN5s6cKwVug+oHMaIA= +entgo.io/ent v0.14.1 h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s= +entgo.io/ent v0.14.1/go.mod h1:MH6XLG0KXpkcDQhKiHfANZSzR55TJyPL5IGNpI8wpco= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 h1:d4ZG8mELlLeUWFBMCqPtRfEP3J6aQgg/KTC9jLSlkMs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1/go.mod h1:uZoEIR6PzGOZEjgAZE4hfYfsqK2zOHhq68JLKEvvXj4= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bytedance/sonic v1.12.8 h1:4xYRVRlXIgvSZ4e8iVTlMF5szgpXd4AfvuWgA8I8lgs= +github.com/bytedance/sonic v1.12.8/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0= +github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/chai2010/webp v1.1.1 h1:jTRmEccAJ4MGrhFOrPMpNGIJ/eybIgwKpcACsrTEapk= +github.com/chai2010/webp v1.1.1/go.mod h1:0XVwvZWdjjdxpUEIf7b9g9VkHFnInUSYujwqTLEuldU= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= +github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk= +github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg= +github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4= +golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/backend/internal/auth/auth.go b/backend/internal/auth/auth.go new file mode 100644 index 0000000..a408cbc --- /dev/null +++ b/backend/internal/auth/auth.go @@ -0,0 +1,6 @@ +package auth + +// Constants for auth-related context keys +const ( + UserIDKey = "user_id" +) diff --git a/backend/internal/auth/auth_test.go b/backend/internal/auth/auth_test.go new file mode 100644 index 0000000..109a7fe --- /dev/null +++ b/backend/internal/auth/auth_test.go @@ -0,0 +1,27 @@ +package auth + +import ( + "context" + "testing" +) + +func TestUserIDKey(t *testing.T) { + // Test that the UserIDKey constant is defined correctly + if UserIDKey != "user_id" { + t.Errorf("UserIDKey = %v, want %v", UserIDKey, "user_id") + } + + // Test context with user ID + ctx := context.WithValue(context.Background(), UserIDKey, "test-user-123") + value := ctx.Value(UserIDKey) + if value != "test-user-123" { + t.Errorf("Context value = %v, want %v", value, "test-user-123") + } + + // Test context without user ID + emptyCtx := context.Background() + emptyValue := emptyCtx.Value(UserIDKey) + if emptyValue != nil { + t.Errorf("Empty context value = %v, want nil", emptyValue) + } +} diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go new file mode 100644 index 0000000..3b4864c --- /dev/null +++ b/backend/internal/config/config.go @@ -0,0 +1,74 @@ +package config + +import ( + "os" + + "gopkg.in/yaml.v3" + "tss-rocks-be/internal/types" +) + +type Config struct { + Database DatabaseConfig `yaml:"database"` + Server ServerConfig `yaml:"server"` + JWT JWTConfig `yaml:"jwt"` + Storage StorageConfig `yaml:"storage"` + Logging LoggingConfig `yaml:"logging"` + RateLimit types.RateLimitConfig `yaml:"rate_limit"` + AccessLog types.AccessLogConfig `yaml:"access_log"` +} + +type DatabaseConfig struct { + Driver string `yaml:"driver"` + DSN string `yaml:"dsn"` +} + +type ServerConfig struct { + Port int `yaml:"port"` + Host string `yaml:"host"` +} + +type JWTConfig struct { + Secret string `yaml:"secret"` + Expiration string `yaml:"expiration"` +} + +type LoggingConfig struct { + Level string `yaml:"level"` + Format string `yaml:"format"` +} + +type StorageConfig struct { + Type string `yaml:"type"` + Local LocalStorage `yaml:"local"` + S3 S3Storage `yaml:"s3"` + Upload types.UploadConfig `yaml:"upload"` +} + +type LocalStorage struct { + RootDir string `yaml:"root_dir"` +} + +type S3Storage struct { + Region string `yaml:"region"` + Bucket string `yaml:"bucket"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` + Endpoint string `yaml:"endpoint"` + CustomURL string `yaml:"custom_url"` + ProxyS3 bool `yaml:"proxy_s3"` +} + +// Load loads configuration from a YAML file +func Load(path string) (*Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var cfg Config + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go new file mode 100644 index 0000000..800f5b0 --- /dev/null +++ b/backend/internal/config/config_test.go @@ -0,0 +1,85 @@ +package config + +import ( + "os" + "path/filepath" + "testing" +) + +func TestLoad(t *testing.T) { + // Create a temporary test config file + content := []byte(` +database: + driver: postgres + dsn: postgres://user:pass@localhost:5432/dbname +server: + port: 8080 + host: localhost +jwt: + secret: test-secret + expiration: 24h +storage: + type: local + local: + root_dir: /tmp/storage + upload: + max_size: 10485760 +logging: + level: info + format: json +`) + + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "config.yaml") + if err := os.WriteFile(configPath, content, 0644); err != nil { + t.Fatalf("Failed to write test config: %v", err) + } + + // Test loading config + cfg, err := Load(configPath) + if err != nil { + t.Fatalf("Load() error = %v", err) + } + + // Verify loaded values + tests := []struct { + name string + got interface{} + want interface{} + errorMsg string + }{ + {"Database Driver", cfg.Database.Driver, "postgres", "incorrect database driver"}, + {"Server Port", cfg.Server.Port, 8080, "incorrect server port"}, + {"JWT Secret", cfg.JWT.Secret, "test-secret", "incorrect JWT secret"}, + {"Storage Type", cfg.Storage.Type, "local", "incorrect storage type"}, + {"Logging Level", cfg.Logging.Level, "info", "incorrect logging level"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.got != tt.want { + t.Errorf("%s = %v, want %v", tt.name, tt.got, tt.want) + } + }) + } +} + +func TestLoadError(t *testing.T) { + // Test loading non-existent file + _, err := Load("non-existent-file.yaml") + if err == nil { + t.Error("Load() error = nil, want error for non-existent file") + } + + // Test loading invalid YAML + tmpDir := t.TempDir() + invalidPath := filepath.Join(tmpDir, "invalid.yaml") + if err := os.WriteFile(invalidPath, []byte("invalid: }{yaml"), 0644); err != nil { + t.Fatalf("Failed to write invalid config: %v", err) + } + + _, err = Load(invalidPath) + if err == nil { + t.Error("Load() error = nil, want error for invalid YAML") + } +} diff --git a/backend/internal/handler/auth.go b/backend/internal/handler/auth.go new file mode 100644 index 0000000..d3fa471 --- /dev/null +++ b/backend/internal/handler/auth.go @@ -0,0 +1,119 @@ +package handler + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt/v5" + "github.com/rs/zerolog/log" +) + +type RegisterRequest struct { + Email string `json:"email" binding:"required,email"` + Password string `json:"password" binding:"required,min=8"` + Role string `json:"role" binding:"required,oneof=admin editor contributor"` +} + +type LoginRequest struct { + Email string `json:"email" binding:"required,email"` + Password string `json:"password" binding:"required"` +} + +type AuthResponse struct { + Token string `json:"token"` +} + +func (h *Handler) Register(c *gin.Context) { + var req RegisterRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + user, err := h.service.CreateUser(c.Request.Context(), req.Email, req.Password, req.Role) + if err != nil { + log.Error().Err(err).Msg("Failed to create user") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create user"}) + return + } + + // Get user roles + roles, err := h.service.GetUserRoles(c.Request.Context(), user.ID) + if err != nil { + log.Error().Err(err).Msg("Failed to get user roles") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get user roles"}) + return + } + + // Extract role names for JWT + roleNames := make([]string, len(roles)) + for i, r := range roles { + roleNames[i] = r.Name + } + + // Generate JWT token + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": user.ID, + "roles": roleNames, + "exp": time.Now().Add(24 * time.Hour).Unix(), + }) + + tokenString, err := token.SignedString([]byte(h.cfg.JWT.Secret)) + if err != nil { + log.Error().Err(err).Msg("Failed to generate token") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to generate token"}) + return + } + + c.JSON(http.StatusCreated, AuthResponse{Token: tokenString}) +} + +func (h *Handler) Login(c *gin.Context) { + var req LoginRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + user, err := h.service.GetUserByEmail(c.Request.Context(), req.Email) + if err != nil { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Invalid credentials"}) + return + } + + if !h.service.ValidatePassword(c.Request.Context(), user, req.Password) { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Invalid credentials"}) + return + } + + // Get user roles + roles, err := h.service.GetUserRoles(c.Request.Context(), user.ID) + if err != nil { + log.Error().Err(err).Msg("Failed to get user roles") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get user roles"}) + return + } + + // Extract role names for JWT + roleNames := make([]string, len(roles)) + for i, r := range roles { + roleNames[i] = r.Name + } + + // Generate JWT token + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": user.ID, + "roles": roleNames, + "exp": time.Now().Add(24 * time.Hour).Unix(), + }) + + tokenString, err := token.SignedString([]byte(h.cfg.JWT.Secret)) + if err != nil { + log.Error().Err(err).Msg("Failed to generate token") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to generate token"}) + return + } + + c.JSON(http.StatusOK, AuthResponse{Token: tokenString}) +} diff --git a/backend/internal/handler/auth_handler_test.go b/backend/internal/handler/auth_handler_test.go new file mode 100644 index 0000000..0c366f2 --- /dev/null +++ b/backend/internal/handler/auth_handler_test.go @@ -0,0 +1,276 @@ +package handler + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" +) + +type AuthHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *AuthHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + s.handler = NewHandler(&config.Config{ + JWT: config.JWTConfig{ + Secret: "test-secret", + }, + }, s.service) + s.router = gin.New() +} + +func (s *AuthHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestAuthHandlerSuite(t *testing.T) { + suite.Run(t, new(AuthHandlerTestSuite)) +} + +func (s *AuthHandlerTestSuite) TestRegister() { + testCases := []struct { + name string + request RegisterRequest + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功注册", + request: RegisterRequest{ + Email: "test@example.com", + Password: "password123", + Role: "contributor", + }, + setupMock: func() { + user := &ent.User{ + ID: 1, + Email: "test@example.com", + } + s.service.EXPECT(). + CreateUser(gomock.Any(), "test@example.com", "password123", "contributor"). + Return(user, nil) + s.service.EXPECT(). + GetUserRoles(gomock.Any(), user.ID). + Return([]*ent.Role{{ID: 1, Name: "contributor"}}, nil) + }, + expectedStatus: http.StatusCreated, + }, + { + name: "无效的邮箱格式", + request: RegisterRequest{ + Email: "invalid-email", + Password: "password123", + Role: "contributor", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Key: 'RegisterRequest.Email' Error:Field validation for 'Email' failed on the 'email' tag", + }, + { + name: "密码太短", + request: RegisterRequest{ + Email: "test@example.com", + Password: "short", + Role: "contributor", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Key: 'RegisterRequest.Password' Error:Field validation for 'Password' failed on the 'min' tag", + }, + { + name: "无效的角色", + request: RegisterRequest{ + Email: "test@example.com", + Password: "password123", + Role: "invalid-role", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Key: 'RegisterRequest.Role' Error:Field validation for 'Role' failed on the 'oneof' tag", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + reqBody, _ := json.Marshal(tc.request) + req, _ := http.NewRequest(http.MethodPost, "/register", bytes.NewBuffer(reqBody)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // 执行请求 + s.handler.Register(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Contains(response["error"], tc.expectedError) + } else { + var response AuthResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.NotEmpty(response.Token) + } + }) + } +} + +func (s *AuthHandlerTestSuite) TestLogin() { + testCases := []struct { + name string + request LoginRequest + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功登录", + request: LoginRequest{ + Email: "test@example.com", + Password: "password123", + }, + setupMock: func() { + user := &ent.User{ + ID: 1, + Email: "test@example.com", + } + s.service.EXPECT(). + GetUserByEmail(gomock.Any(), "test@example.com"). + Return(user, nil) + s.service.EXPECT(). + ValidatePassword(gomock.Any(), user, "password123"). + Return(true) + s.service.EXPECT(). + GetUserRoles(gomock.Any(), user.ID). + Return([]*ent.Role{{ID: 1, Name: "contributor"}}, nil) + }, + expectedStatus: http.StatusOK, + }, + { + name: "无效的邮箱格式", + request: LoginRequest{ + Email: "invalid-email", + Password: "password123", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Key: 'LoginRequest.Email' Error:Field validation for 'Email' failed on the 'email' tag", + }, + { + name: "用户不存在", + request: LoginRequest{ + Email: "nonexistent@example.com", + Password: "password123", + }, + setupMock: func() { + s.service.EXPECT(). + GetUserByEmail(gomock.Any(), "nonexistent@example.com"). + Return(nil, errors.New("user not found")) + }, + expectedStatus: http.StatusUnauthorized, + expectedError: "Invalid credentials", + }, + { + name: "密码错误", + request: LoginRequest{ + Email: "test@example.com", + Password: "wrong-password", + }, + setupMock: func() { + user := &ent.User{ + ID: 1, + Email: "test@example.com", + } + s.service.EXPECT(). + GetUserByEmail(gomock.Any(), "test@example.com"). + Return(user, nil) + s.service.EXPECT(). + ValidatePassword(gomock.Any(), user, "wrong-password"). + Return(false) + }, + expectedStatus: http.StatusUnauthorized, + expectedError: "Invalid credentials", + }, + { + name: "获取用户角色失败", + request: LoginRequest{ + Email: "test@example.com", + Password: "password123", + }, + setupMock: func() { + user := &ent.User{ + ID: 1, + Email: "test@example.com", + } + s.service.EXPECT(). + GetUserByEmail(gomock.Any(), "test@example.com"). + Return(user, nil) + s.service.EXPECT(). + ValidatePassword(gomock.Any(), user, "password123"). + Return(true) + s.service.EXPECT(). + GetUserRoles(gomock.Any(), user.ID). + Return(nil, errors.New("failed to get roles")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to get user roles", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + reqBody, _ := json.Marshal(tc.request) + req, _ := http.NewRequest(http.MethodPost, "/login", bytes.NewBuffer(reqBody)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // 执行请求 + s.handler.Login(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Contains(response["error"], tc.expectedError) + } else { + var response AuthResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.NotEmpty(response.Token) + } + }) + } +} diff --git a/backend/internal/handler/category_handler_test.go b/backend/internal/handler/category_handler_test.go new file mode 100644 index 0000000..a5da5f0 --- /dev/null +++ b/backend/internal/handler/category_handler_test.go @@ -0,0 +1,468 @@ +package handler + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "tss-rocks-be/ent" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + "tss-rocks-be/internal/types" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "errors" +) + +// Custom assertion function for comparing categories +func assertCategoryEqual(t assert.TestingT, expected, actual *ent.Category) bool { + if expected == nil && actual == nil { + return true + } + if expected == nil || actual == nil { + return assert.Fail(t, "One category is nil while the other is not") + } + + // Compare only relevant fields, ignoring time fields + return assert.Equal(t, expected.ID, actual.ID) && + assert.Equal(t, expected.Edges.Contents, actual.Edges.Contents) +} + +// Custom assertion function for comparing category slices +func assertCategorySliceEqual(t assert.TestingT, expected, actual []*ent.Category) bool { + if len(expected) != len(actual) { + return assert.Fail(t, "Category slice lengths do not match") + } + + for i := range expected { + if !assertCategoryEqual(t, expected[i], actual[i]) { + return false + } + } + return true +} + +type CategoryHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *CategoryHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + cfg := &config.Config{} + s.handler = NewHandler(cfg, s.service) + + // Setup Gin router + gin.SetMode(gin.TestMode) + s.router = gin.New() + s.handler.RegisterRoutes(s.router) +} + +func (s *CategoryHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestCategoryHandlerSuite(t *testing.T) { + suite.Run(t, new(CategoryHandlerTestSuite)) +} + +// Test cases for ListCategories +func (s *CategoryHandlerTestSuite) TestListCategories() { + testCases := []struct { + name string + langCode string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success with default language", + langCode: "", + setupMock: func() { + s.service.EXPECT(). + ListCategories(gomock.Any(), gomock.Eq("en")). + Return([]*ent.Category{ + { + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: "Test Description", + Slug: "test-category", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Category{ + { + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: "Test Description", + Slug: "test-category", + }, + }, + }, + }, + }, + }, + { + name: "Success with specific language", + langCode: "zh", + setupMock: func() { + s.service.EXPECT(). + ListCategories(gomock.Any(), gomock.Eq("zh")). + Return([]*ent.Category{ + { + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("zh"), + Name: "测试分类", + Description: "测试描述", + Slug: "test-category", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Category{ + { + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("zh"), + Name: "测试分类", + Description: "测试描述", + Slug: "test-category", + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup mock + tc.setupMock() + + // Create request + url := "/api/v1/categories" + if tc.langCode != "" { + url += "?lang=" + tc.langCode + } + req := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + + // Perform request + s.router.ServeHTTP(w, req) + + // Assert response + assert.Equal(s.T(), tc.expectedStatus, w.Code) + if tc.expectedBody != nil { + var response []*ent.Category + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(s.T(), err) + assertCategorySliceEqual(s.T(), tc.expectedBody.([]*ent.Category), response) + } + }) + } +} + +// Test cases for GetCategory +func (s *CategoryHandlerTestSuite) TestGetCategory() { + testCases := []struct { + name string + langCode string + slug string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + langCode: "en", + slug: "test-category", + setupMock: func() { + s.service.EXPECT(). + GetCategoryBySlug(gomock.Any(), gomock.Eq("en"), gomock.Eq("test-category")). + Return(&ent.Category{ + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: "Test Description", + Slug: "test-category", + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: &ent.Category{ + ID: 1, + Edges: ent.CategoryEdges{ + Contents: []*ent.CategoryContent{ + { + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: "Test Description", + Slug: "test-category", + }, + }, + }, + }, + }, + { + name: "Not Found", + langCode: "en", + slug: "non-existent", + setupMock: func() { + s.service.EXPECT(). + GetCategoryBySlug(gomock.Any(), gomock.Eq("en"), gomock.Eq("non-existent")). + Return(nil, types.ErrNotFound) + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup mock + tc.setupMock() + + // Create request + url := "/api/v1/categories/" + tc.slug + if tc.langCode != "" { + url += "?lang=" + tc.langCode + } + req := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + + // Perform request + s.router.ServeHTTP(w, req) + + // Assert response + assert.Equal(s.T(), tc.expectedStatus, w.Code) + if tc.expectedBody != nil { + var response ent.Category + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(s.T(), err) + assertCategoryEqual(s.T(), tc.expectedBody.(*ent.Category), &response) + } + }) + } +} + +// Test cases for AddCategoryContent +func (s *CategoryHandlerTestSuite) TestAddCategoryContent() { + var description = "Test Description" + testCases := []struct { + name string + categoryID string + requestBody interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + categoryID: "1", + requestBody: AddCategoryContentRequest{ + LanguageCode: "en", + Name: "Test Category", + Description: &description, + Slug: "test-category", + }, + setupMock: func() { + s.service.EXPECT(). + AddCategoryContent( + gomock.Any(), + 1, + "en", + "Test Category", + description, + "test-category", + ). + Return(&ent.CategoryContent{ + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: description, + Slug: "test-category", + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: &ent.CategoryContent{ + LanguageCode: categorycontent.LanguageCode("en"), + Name: "Test Category", + Description: description, + Slug: "test-category", + }, + }, + { + name: "Invalid JSON", + categoryID: "1", + requestBody: "invalid json", + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + }, + { + name: "Invalid Category ID", + categoryID: "invalid", + requestBody: AddCategoryContentRequest{ + LanguageCode: "en", + Name: "Test Category", + Description: &description, + Slug: "test-category", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + }, + { + name: "Service Error", + categoryID: "1", + requestBody: AddCategoryContentRequest{ + LanguageCode: "en", + Name: "Test Category", + Description: &description, + Slug: "test-category", + }, + setupMock: func() { + s.service.EXPECT(). + AddCategoryContent( + gomock.Any(), + 1, + "en", + "Test Category", + description, + "test-category", + ). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup mock + tc.setupMock() + + // Create request + var body []byte + var err error + if str, ok := tc.requestBody.(string); ok { + body = []byte(str) + } else { + body, err = json.Marshal(tc.requestBody) + s.NoError(err) + } + + req := httptest.NewRequest(http.MethodPost, "/api/v1/categories/"+tc.categoryID+"/contents", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + // Perform request + s.router.ServeHTTP(w, req) + + // Assert response + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedBody != nil { + var response ent.CategoryContent + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedBody, &response) + } + }) + } +} + +// Test cases for CreateCategory +func (s *CategoryHandlerTestSuite) TestCreateCategory() { + testCases := []struct { + name string + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功创建分类", + setupMock: func() { + category := &ent.Category{ + ID: 1, + } + s.service.EXPECT(). + CreateCategory(gomock.Any()). + Return(category, nil) + }, + expectedStatus: http.StatusCreated, + }, + { + name: "创建分类失败", + setupMock: func() { + s.service.EXPECT(). + CreateCategory(gomock.Any()). + Return(nil, errors.New("failed to create category")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to create category", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + req, _ := http.NewRequest(http.MethodPost, "/categories", nil) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // 执行请求 + s.handler.CreateCategory(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedError, response["error"]) + } else { + var response *ent.Category + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.NotNil(response) + s.Equal(1, response.ID) + } + }) + } +} diff --git a/backend/internal/handler/contributor_handler_test.go b/backend/internal/handler/contributor_handler_test.go new file mode 100644 index 0000000..142cf1a --- /dev/null +++ b/backend/internal/handler/contributor_handler_test.go @@ -0,0 +1,443 @@ +package handler + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "errors" +) + +type ContributorHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *ContributorHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + cfg := &config.Config{} + s.handler = NewHandler(cfg, s.service) + + // Setup Gin router + gin.SetMode(gin.TestMode) + s.router = gin.New() + s.handler.RegisterRoutes(s.router) +} + +func (s *ContributorHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestContributorHandlerSuite(t *testing.T) { + suite.Run(t, new(ContributorHandlerTestSuite)) +} + +func (s *ContributorHandlerTestSuite) TestListContributors() { + testCases := []struct { + name string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + setupMock: func() { + s.service.EXPECT(). + ListContributors(gomock.Any()). + Return([]*ent.Contributor{ + { + ID: 1, + Name: "John Doe", + Edges: ent.ContributorEdges{ + SocialLinks: []*ent.ContributorSocialLink{ + { + Type: "github", + Value: "https://github.com/johndoe", + Edges: ent.ContributorSocialLinkEdges{}, + }, + }, + }, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + }, + { + ID: 2, + Name: "Jane Smith", + Edges: ent.ContributorEdges{ + SocialLinks: []*ent.ContributorSocialLink{}, // Ensure empty SocialLinks array is present + }, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []gin.H{ + { + "id": 1, + "name": "John Doe", + "created_at": time.Time{}, + "updated_at": time.Time{}, + "edges": gin.H{ + "social_links": []gin.H{ + { + "type": "github", + "value": "https://github.com/johndoe", + "edges": gin.H{}, + }, + }, + }, + }, + { + "id": 2, + "name": "Jane Smith", + "created_at": time.Time{}, + "updated_at": time.Time{}, + "edges": gin.H{ + "social_links": []gin.H{}, // Ensure empty SocialLinks array is present + }, + }, + }, + }, + { + name: "Service error", + setupMock: func() { + s.service.EXPECT(). + ListContributors(gomock.Any()). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to list contributors"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + req := httptest.NewRequest(http.MethodGet, "/api/v1/contributors", nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *ContributorHandlerTestSuite) TestGetContributor() { + testCases := []struct { + name string + id string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + id: "1", + setupMock: func() { + s.service.EXPECT(). + GetContributorByID(gomock.Any(), 1). + Return(&ent.Contributor{ + ID: 1, + Name: "John Doe", + Edges: ent.ContributorEdges{ + SocialLinks: []*ent.ContributorSocialLink{ + { + Type: "github", + Value: "https://github.com/johndoe", + Edges: ent.ContributorSocialLinkEdges{}, + }, + }, + }, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: gin.H{ + "id": 1, + "name": "John Doe", + "created_at": time.Time{}, + "updated_at": time.Time{}, + "edges": gin.H{ + "social_links": []gin.H{ + { + "type": "github", + "value": "https://github.com/johndoe", + "edges": gin.H{}, + }, + }, + }, + }, + }, + { + name: "Invalid ID", + id: "invalid", + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Invalid contributor ID"}, + }, + { + name: "Service error", + id: "1", + setupMock: func() { + s.service.EXPECT(). + GetContributorByID(gomock.Any(), 1). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to get contributor"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + req := httptest.NewRequest(http.MethodGet, "/api/v1/contributors/"+tc.id, nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *ContributorHandlerTestSuite) TestCreateContributor() { + testCases := []struct { + name string + body interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + body: CreateContributorRequest{ + Name: "John Doe", + }, + setupMock: func() { + name := "John Doe" + s.service.EXPECT(). + CreateContributor( + gomock.Any(), + name, + nil, + nil, + ). + Return(&ent.Contributor{ + ID: 1, + Name: name, + CreatedAt: time.Time{}, + UpdatedAt: time.Time{}, + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: gin.H{ + "id": 1, + "name": "John Doe", + "created_at": time.Time{}, + "updated_at": time.Time{}, + "edges": gin.H{}, + }, + }, + { + name: "Invalid request body", + body: map[string]interface{}{ + "name": "", // Empty name is not allowed + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Key: 'CreateContributorRequest.Name' Error:Field validation for 'Name' failed on the 'required' tag"}, + }, + { + name: "Service error", + body: CreateContributorRequest{ + Name: "John Doe", + }, + setupMock: func() { + name := "John Doe" + s.service.EXPECT(). + CreateContributor( + gomock.Any(), + name, + nil, + nil, + ). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to create contributor"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + body, err := json.Marshal(tc.body) + s.NoError(err, "Failed to marshal request body") + + req := httptest.NewRequest(http.MethodPost, "/api/v1/contributors", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *ContributorHandlerTestSuite) TestAddContributorSocialLink() { + testCases := []struct { + name string + id string + body interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + id: "1", + body: func() AddContributorSocialLinkRequest { + name := "johndoe" + return AddContributorSocialLinkRequest{ + Type: "github", + Name: &name, + Value: "https://github.com/johndoe", + } + }(), + setupMock: func() { + name := "johndoe" + s.service.EXPECT(). + AddContributorSocialLink( + gomock.Any(), + 1, + "github", + name, + "https://github.com/johndoe", + ). + Return(&ent.ContributorSocialLink{ + Type: "github", + Name: name, + Value: "https://github.com/johndoe", + Edges: ent.ContributorSocialLinkEdges{}, + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: gin.H{ + "type": "github", + "name": "johndoe", + "value": "https://github.com/johndoe", + "edges": gin.H{}, + }, + }, + { + name: "Invalid contributor ID", + id: "invalid", + body: func() AddContributorSocialLinkRequest { + name := "johndoe" + return AddContributorSocialLinkRequest{ + Type: "github", + Name: &name, + Value: "https://github.com/johndoe", + } + }(), + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Invalid contributor ID"}, + }, + { + name: "Invalid request body", + id: "1", + body: map[string]interface{}{ + "type": "", // Empty type is not allowed + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Key: 'AddContributorSocialLinkRequest.Type' Error:Field validation for 'Type' failed on the 'required' tag\nKey: 'AddContributorSocialLinkRequest.Value' Error:Field validation for 'Value' failed on the 'required' tag"}, + }, + { + name: "Service error", + id: "1", + body: func() AddContributorSocialLinkRequest { + name := "johndoe" + return AddContributorSocialLinkRequest{ + Type: "github", + Name: &name, + Value: "https://github.com/johndoe", + } + }(), + setupMock: func() { + name := "johndoe" + s.service.EXPECT(). + AddContributorSocialLink( + gomock.Any(), + 1, + "github", + name, + "https://github.com/johndoe", + ). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to add contributor social link"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + body, err := json.Marshal(tc.body) + s.NoError(err, "Failed to marshal request body") + + req := httptest.NewRequest(http.MethodPost, "/api/v1/contributors/"+tc.id+"/social-links", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} diff --git a/backend/internal/handler/daily_handler_test.go b/backend/internal/handler/daily_handler_test.go new file mode 100644 index 0000000..94e39ee --- /dev/null +++ b/backend/internal/handler/daily_handler_test.go @@ -0,0 +1,519 @@ +package handler + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "errors" + "strings" +) + +type DailyHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *DailyHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + cfg := &config.Config{} + s.handler = NewHandler(cfg, s.service) + + // Setup Gin router + gin.SetMode(gin.TestMode) + s.router = gin.New() + s.handler.RegisterRoutes(s.router) +} + +func (s *DailyHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestDailyHandlerSuite(t *testing.T) { + suite.Run(t, new(DailyHandlerTestSuite)) +} + +func (s *DailyHandlerTestSuite) TestListDailies() { + testCases := []struct { + name string + langCode string + categoryID string + limit string + offset string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success with default language", + langCode: "", + setupMock: func() { + s.service.EXPECT(). + ListDailies(gomock.Any(), "en", nil, 10, 0). + Return([]*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, + }, + }, + { + name: "Success with specific language", + langCode: "zh", + setupMock: func() { + s.service.EXPECT(). + ListDailies(gomock.Any(), "zh", nil, 10, 0). + Return([]*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "zh", + Quote: "测试语录1", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "zh", + Quote: "测试语录1", + }, + }, + }, + }, + }, + }, + { + name: "Success with category filter", + categoryID: "1", + setupMock: func() { + categoryID := 1 + s.service.EXPECT(). + ListDailies(gomock.Any(), "en", &categoryID, 10, 0). + Return([]*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Daily{ + { + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, + }, + }, + { + name: "Success with pagination", + limit: "2", + offset: "1", + setupMock: func() { + s.service.EXPECT(). + ListDailies(gomock.Any(), "en", nil, 2, 1). + Return([]*ent.Daily{ + { + ID: "daily2", + ImageURL: "https://example.com/image2.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 2", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Daily{ + { + ID: "daily2", + ImageURL: "https://example.com/image2.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 2", + }, + }, + }, + }, + }, + }, + { + name: "Service Error", + setupMock: func() { + s.service.EXPECT(). + ListDailies(gomock.Any(), "en", nil, 10, 0). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to list dailies"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + url := "/api/v1/dailies" + if tc.langCode != "" { + url += "?lang=" + tc.langCode + } + if tc.categoryID != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "category_id=" + tc.categoryID + } + if tc.limit != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "limit=" + tc.limit + } + if tc.offset != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "offset=" + tc.offset + } + + req := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *DailyHandlerTestSuite) TestGetDaily() { + testCases := []struct { + name string + id string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + id: "daily1", + setupMock: func() { + s.service.EXPECT(). + GetDailyByID(gomock.Any(), "daily1"). + Return(&ent.Daily{ + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: &ent.Daily{ + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{ + { + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + }, + }, + }, + { + name: "Service error", + id: "daily1", + setupMock: func() { + s.service.EXPECT(). + GetDailyByID(gomock.Any(), "daily1"). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to get daily"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + req := httptest.NewRequest(http.MethodGet, "/api/v1/dailies/"+tc.id, nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *DailyHandlerTestSuite) TestCreateDaily() { + testCases := []struct { + name string + body interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + body: CreateDailyRequest{ + ID: "daily1", + CategoryID: 1, + ImageURL: "https://example.com/image1.jpg", + }, + setupMock: func() { + s.service.EXPECT(). + CreateDaily(gomock.Any(), "daily1", 1, "https://example.com/image1.jpg"). + Return(&ent.Daily{ + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{}, + }, + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: &ent.Daily{ + ID: "daily1", + ImageURL: "https://example.com/image1.jpg", + Edges: ent.DailyEdges{ + Category: &ent.Category{ID: 1}, + Contents: []*ent.DailyContent{}, + }, + }, + }, + { + name: "Invalid request body", + body: map[string]interface{}{ + "id": "daily1", + // Missing required fields + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Key: 'CreateDailyRequest.CategoryID' Error:Field validation for 'CategoryID' failed on the 'required' tag\nKey: 'CreateDailyRequest.ImageURL' Error:Field validation for 'ImageURL' failed on the 'required' tag"}, + }, + { + name: "Service error", + body: CreateDailyRequest{ + ID: "daily1", + CategoryID: 1, + ImageURL: "https://example.com/image1.jpg", + }, + setupMock: func() { + s.service.EXPECT(). + CreateDaily(gomock.Any(), "daily1", 1, "https://example.com/image1.jpg"). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to create daily"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + body, err := json.Marshal(tc.body) + s.NoError(err, "Failed to marshal request body") + + req := httptest.NewRequest(http.MethodPost, "/api/v1/dailies", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +func (s *DailyHandlerTestSuite) TestAddDailyContent() { + testCases := []struct { + name string + dailyID string + body interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + dailyID: "daily1", + body: AddDailyContentRequest{ + LanguageCode: "en", + Quote: "Test Quote 1", + }, + setupMock: func() { + s.service.EXPECT(). + AddDailyContent(gomock.Any(), "daily1", "en", "Test Quote 1"). + Return(&ent.DailyContent{ + LanguageCode: "en", + Quote: "Test Quote 1", + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: &ent.DailyContent{ + LanguageCode: "en", + Quote: "Test Quote 1", + }, + }, + { + name: "Invalid request body", + dailyID: "daily1", + body: map[string]interface{}{ + "language_code": "en", + // Missing required fields + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Key: 'AddDailyContentRequest.Quote' Error:Field validation for 'Quote' failed on the 'required' tag"}, + }, + { + name: "Service error", + dailyID: "daily1", + body: AddDailyContentRequest{ + LanguageCode: "en", + Quote: "Test Quote 1", + }, + setupMock: func() { + s.service.EXPECT(). + AddDailyContent(gomock.Any(), "daily1", "en", "Test Quote 1"). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to add daily content"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + body, err := json.Marshal(tc.body) + s.NoError(err, "Failed to marshal request body") + + req := httptest.NewRequest(http.MethodPost, "/api/v1/dailies/"+tc.dailyID+"/contents", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go new file mode 100644 index 0000000..baa7d8d --- /dev/null +++ b/backend/internal/handler/handler.go @@ -0,0 +1,513 @@ +package handler + +import ( + "net/http" + "strconv" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service" + "tss-rocks-be/internal/types" + + "github.com/gin-gonic/gin" + "github.com/rs/zerolog/log" +) + +type Handler struct { + cfg *config.Config + service service.Service +} + +func NewHandler(cfg *config.Config, service service.Service) *Handler { + return &Handler{ + cfg: cfg, + service: service, + } +} + +// RegisterRoutes registers all the routes +func (h *Handler) RegisterRoutes(r *gin.Engine) { + api := r.Group("/api/v1") + { + // Auth routes + auth := api.Group("/auth") + { + auth.POST("/register", h.Register) + auth.POST("/login", h.Login) + } + + // Category routes + categories := api.Group("/categories") + { + categories.GET("", h.ListCategories) + categories.GET("/:slug", h.GetCategory) + categories.POST("", h.CreateCategory) + categories.POST("/:id/contents", h.AddCategoryContent) + } + + // Post routes + posts := api.Group("/posts") + { + posts.GET("", h.ListPosts) + posts.GET("/:slug", h.GetPost) + posts.POST("", h.CreatePost) + posts.POST("/:id/contents", h.AddPostContent) + } + + // Contributor routes + contributors := api.Group("/contributors") + { + contributors.GET("", h.ListContributors) + contributors.GET("/:id", h.GetContributor) + contributors.POST("", h.CreateContributor) + contributors.POST("/:id/social-links", h.AddContributorSocialLink) + } + + // Daily routes + dailies := api.Group("/dailies") + { + dailies.GET("", h.ListDailies) + dailies.GET("/:id", h.GetDaily) + dailies.POST("", h.CreateDaily) + dailies.POST("/:id/contents", h.AddDailyContent) + } + + // Media routes + media := api.Group("/media") + { + media.GET("", h.ListMedia) + media.POST("", h.UploadMedia) + media.GET("/:id", h.GetMedia) + media.DELETE("/:id", h.DeleteMedia) + } + } +} + +// Category handlers +func (h *Handler) ListCategories(c *gin.Context) { + langCode := c.Query("lang") + if langCode == "" { + langCode = "en" // Default to English + } + + categories, err := h.service.ListCategories(c.Request.Context(), langCode) + if err != nil { + log.Error().Err(err).Msg("Failed to list categories") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list categories"}) + return + } + + c.JSON(http.StatusOK, categories) +} + +func (h *Handler) GetCategory(c *gin.Context) { + langCode := c.Query("lang") + if langCode == "" { + langCode = "en" // Default to English + } + + slug := c.Param("slug") + category, err := h.service.GetCategoryBySlug(c.Request.Context(), langCode, slug) + if err != nil { + if err == types.ErrNotFound { + c.JSON(http.StatusNotFound, gin.H{"error": "Category not found"}) + return + } + log.Error().Err(err).Msg("Failed to get category") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get category"}) + return + } + + c.JSON(http.StatusOK, category) +} + +func (h *Handler) CreateCategory(c *gin.Context) { + category, err := h.service.CreateCategory(c.Request.Context()) + if err != nil { + log.Error().Err(err).Msg("Failed to create category") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create category"}) + return + } + + c.JSON(http.StatusCreated, category) +} + +type AddCategoryContentRequest struct { + LanguageCode string `json:"language_code" binding:"required"` + Name string `json:"name" binding:"required"` + Description *string `json:"description"` + Slug string `json:"slug" binding:"required"` +} + +func (h *Handler) AddCategoryContent(c *gin.Context) { + var req AddCategoryContentRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + categoryID, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid category ID"}) + return + } + + var description string + if req.Description != nil { + description = *req.Description + } + + content, err := h.service.AddCategoryContent(c.Request.Context(), categoryID, req.LanguageCode, req.Name, description, req.Slug) + if err != nil { + log.Error().Err(err).Msg("Failed to add category content") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to add category content"}) + return + } + + c.JSON(http.StatusCreated, content) +} + +// Post handlers +func (h *Handler) ListPosts(c *gin.Context) { + langCode := c.Query("lang") + if langCode == "" { + langCode = "en" // Default to English + } + + var categoryID *int + if catIDStr := c.Query("category_id"); catIDStr != "" { + if id, err := strconv.Atoi(catIDStr); err == nil { + categoryID = &id + } + } + + limit := 10 // Default limit + if limitStr := c.Query("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + limit = l + } + } + + offset := 0 // Default offset + if offsetStr := c.Query("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + posts, err := h.service.ListPosts(c.Request.Context(), langCode, categoryID, limit, offset) + if err != nil { + log.Error().Err(err).Msg("Failed to list posts") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list posts"}) + return + } + + c.JSON(http.StatusOK, posts) +} + +func (h *Handler) GetPost(c *gin.Context) { + langCode := c.Query("lang") + if langCode == "" { + langCode = "en" // Default to English + } + + slug := c.Param("slug") + post, err := h.service.GetPostBySlug(c.Request.Context(), langCode, slug) + if err != nil { + log.Error().Err(err).Msg("Failed to get post") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get post"}) + return + } + + // Convert to a map to control the fields + response := gin.H{ + "id": post.ID, + "status": post.Status, + "slug": post.Slug, + "edges": gin.H{ + "contents": []gin.H{}, + }, + } + + contents := make([]gin.H, 0, len(post.Edges.Contents)) + for _, content := range post.Edges.Contents { + contents = append(contents, gin.H{ + "language_code": content.LanguageCode, + "title": content.Title, + "content_markdown": content.ContentMarkdown, + "summary": content.Summary, + }) + } + response["edges"].(gin.H)["contents"] = contents + + c.JSON(http.StatusOK, response) +} + +func (h *Handler) CreatePost(c *gin.Context) { + post, err := h.service.CreatePost(c.Request.Context(), "draft") // Default to draft status + if err != nil { + log.Error().Err(err).Msg("Failed to create post") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create post"}) + return + } + + // Convert to a map to control the fields + response := gin.H{ + "id": post.ID, + "status": post.Status, + "edges": gin.H{ + "contents": []interface{}{}, + }, + } + + c.JSON(http.StatusCreated, response) +} + +type AddPostContentRequest struct { + LanguageCode string `json:"language_code" binding:"required"` + Title string `json:"title" binding:"required"` + ContentMarkdown string `json:"content_markdown" binding:"required"` + Summary string `json:"summary" binding:"required"` + MetaKeywords string `json:"meta_keywords"` + MetaDescription string `json:"meta_description"` +} + +func (h *Handler) AddPostContent(c *gin.Context) { + var req AddPostContentRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + postID, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid post ID"}) + return + } + + content, err := h.service.AddPostContent(c.Request.Context(), postID, req.LanguageCode, req.Title, req.ContentMarkdown, req.Summary, req.MetaKeywords, req.MetaDescription) + if err != nil { + log.Error().Err(err).Msg("Failed to add post content") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to add post content"}) + return + } + + c.JSON(http.StatusCreated, gin.H{ + "title": content.Title, + "content_markdown": content.ContentMarkdown, + "language_code": content.LanguageCode, + "summary": content.Summary, + "meta_keywords": content.MetaKeywords, + "meta_description": content.MetaDescription, + "edges": gin.H{}, + }) +} + +// Contributor handlers +func (h *Handler) ListContributors(c *gin.Context) { + contributors, err := h.service.ListContributors(c.Request.Context()) + if err != nil { + log.Error().Err(err).Msg("Failed to list contributors") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list contributors"}) + return + } + + response := make([]gin.H, len(contributors)) + for i, contributor := range contributors { + socialLinks := make([]gin.H, len(contributor.Edges.SocialLinks)) + for j, link := range contributor.Edges.SocialLinks { + socialLinks[j] = gin.H{ + "type": link.Type, + "value": link.Value, + "edges": gin.H{}, + } + } + + response[i] = gin.H{ + "id": contributor.ID, + "name": contributor.Name, + "created_at": contributor.CreatedAt, + "updated_at": contributor.UpdatedAt, + "edges": gin.H{ + "social_links": socialLinks, + }, + } + } + + c.JSON(http.StatusOK, response) +} + +func (h *Handler) GetContributor(c *gin.Context) { + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid contributor ID"}) + return + } + + contributor, err := h.service.GetContributorByID(c.Request.Context(), id) + if err != nil { + log.Error().Err(err).Msg("Failed to get contributor") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get contributor"}) + return + } + + c.JSON(http.StatusOK, contributor) +} + +type CreateContributorRequest struct { + Name string `json:"name" binding:"required"` + AvatarURL *string `json:"avatar_url"` + Bio *string `json:"bio"` +} + +func (h *Handler) CreateContributor(c *gin.Context) { + var req CreateContributorRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + contributor, err := h.service.CreateContributor(c.Request.Context(), req.Name, req.AvatarURL, req.Bio) + if err != nil { + log.Error().Err(err).Msg("Failed to create contributor") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create contributor"}) + return + } + + c.JSON(http.StatusCreated, contributor) +} + +type AddContributorSocialLinkRequest struct { + Type string `json:"type" binding:"required"` + Name *string `json:"name"` + Value string `json:"value" binding:"required"` +} + +func (h *Handler) AddContributorSocialLink(c *gin.Context) { + var req AddContributorSocialLinkRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + contributorID, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid contributor ID"}) + return + } + + name := "" + if req.Name != nil { + name = *req.Name + } + link, err := h.service.AddContributorSocialLink(c.Request.Context(), contributorID, req.Type, name, req.Value) + if err != nil { + log.Error().Err(err).Msg("Failed to add contributor social link") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to add contributor social link"}) + return + } + + c.JSON(http.StatusCreated, link) +} + +// Daily handlers +func (h *Handler) ListDailies(c *gin.Context) { + langCode := c.Query("lang") + if langCode == "" { + langCode = "en" // Default to English + } + + var categoryID *int + if catIDStr := c.Query("category_id"); catIDStr != "" { + if id, err := strconv.Atoi(catIDStr); err == nil { + categoryID = &id + } + } + + limit := 10 // Default limit + if limitStr := c.Query("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + limit = l + } + } + + offset := 0 // Default offset + if offsetStr := c.Query("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + dailies, err := h.service.ListDailies(c.Request.Context(), langCode, categoryID, limit, offset) + if err != nil { + log.Error().Err(err).Msg("Failed to list dailies") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list dailies"}) + return + } + + c.JSON(http.StatusOK, dailies) +} + +func (h *Handler) GetDaily(c *gin.Context) { + id := c.Param("id") + daily, err := h.service.GetDailyByID(c.Request.Context(), id) + if err != nil { + log.Error().Err(err).Msg("Failed to get daily") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get daily"}) + return + } + + c.JSON(http.StatusOK, daily) +} + +type CreateDailyRequest struct { + ID string `json:"id" binding:"required"` + CategoryID int `json:"category_id" binding:"required"` + ImageURL string `json:"image_url" binding:"required"` +} + +func (h *Handler) CreateDaily(c *gin.Context) { + var req CreateDailyRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + daily, err := h.service.CreateDaily(c.Request.Context(), req.ID, req.CategoryID, req.ImageURL) + if err != nil { + log.Error().Err(err).Msg("Failed to create daily") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create daily"}) + return + } + + c.JSON(http.StatusCreated, daily) +} + +type AddDailyContentRequest struct { + LanguageCode string `json:"language_code" binding:"required"` + Quote string `json:"quote" binding:"required"` +} + +func (h *Handler) AddDailyContent(c *gin.Context) { + var req AddDailyContentRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + dailyID := c.Param("id") + content, err := h.service.AddDailyContent(c.Request.Context(), dailyID, req.LanguageCode, req.Quote) + if err != nil { + log.Error().Err(err).Msg("Failed to add daily content") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to add daily content"}) + return + } + + c.JSON(http.StatusCreated, content) +} + +// Helper functions +func stringPtr(s *string) string { + if s == nil { + return "" + } + return *s +} diff --git a/backend/internal/handler/handler_test.go b/backend/internal/handler/handler_test.go new file mode 100644 index 0000000..a9d3b9f --- /dev/null +++ b/backend/internal/handler/handler_test.go @@ -0,0 +1,43 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringPtr(t *testing.T) { + testCases := []struct { + name string + input *string + expected string + }{ + { + name: "nil pointer", + input: nil, + expected: "", + }, + { + name: "empty string", + input: strPtr(""), + expected: "", + }, + { + name: "non-empty string", + input: strPtr("test"), + expected: "test", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := stringPtr(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +// Helper function to create string pointer +func strPtr(s string) *string { + return &s +} diff --git a/backend/internal/handler/media.go b/backend/internal/handler/media.go new file mode 100644 index 0000000..68d4a4b --- /dev/null +++ b/backend/internal/handler/media.go @@ -0,0 +1,173 @@ +package handler + +import ( + "fmt" + "io" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/rs/zerolog/log" +) + +// Media handlers +func (h *Handler) ListMedia(c *gin.Context) { + limit := 10 // Default limit + if limitStr := c.Query("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + limit = l + } + } + + offset := 0 // Default offset + if offsetStr := c.Query("offset"); offsetStr != "" { + if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 { + offset = o + } + } + + media, err := h.service.ListMedia(c.Request.Context(), limit, offset) + if err != nil { + log.Error().Err(err).Msg("Failed to list media") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to list media"}) + return + } + + c.JSON(http.StatusOK, media) +} + +func (h *Handler) UploadMedia(c *gin.Context) { + // Get user ID from context (set by auth middleware) + userID, exists := c.Get("user_id") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) + return + } + + // Get file from form + file, err := c.FormFile("file") + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "No file uploaded"}) + return + } + + // 文件大小限制 + if file.Size > 10*1024*1024 { // 10MB + c.JSON(http.StatusBadRequest, gin.H{"error": "File size exceeds the limit (10MB)"}) + return + } + + // 文件类型限制 + allowedTypes := map[string]bool{ + "image/jpeg": true, + "image/png": true, + "image/gif": true, + "video/mp4": true, + "video/webm": true, + "audio/mpeg": true, + "audio/ogg": true, + "application/pdf": true, + } + contentType := file.Header.Get("Content-Type") + if _, ok := allowedTypes[contentType]; !ok { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid file type"}) + return + } + + // Upload file + media, err := h.service.Upload(c.Request.Context(), file, userID.(int)) + if err != nil { + log.Error().Err(err).Msg("Failed to upload media") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to upload media"}) + return + } + + c.JSON(http.StatusCreated, media) +} + +func (h *Handler) GetMedia(c *gin.Context) { + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid media ID"}) + return + } + + // Get media metadata + media, err := h.service.GetMedia(c.Request.Context(), id) + if err != nil { + log.Error().Err(err).Msg("Failed to get media") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get media"}) + return + } + + // Get file content + reader, info, err := h.service.GetFile(c.Request.Context(), id) + if err != nil { + log.Error().Err(err).Msg("Failed to get media file") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get media file"}) + return + } + defer reader.Close() + + // Set response headers + c.Header("Content-Type", media.MimeType) + c.Header("Content-Length", fmt.Sprintf("%d", info.Size)) + c.Header("Content-Disposition", fmt.Sprintf("inline; filename=%s", media.OriginalName)) + + // Stream the file + if _, err := io.Copy(c.Writer, reader); err != nil { + log.Error().Err(err).Msg("Failed to stream media file") + return + } +} + +func (h *Handler) GetMediaFile(c *gin.Context) { + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid media ID"}) + return + } + + // Get file content + reader, info, err := h.service.GetFile(c.Request.Context(), id) + if err != nil { + log.Error().Err(err).Msg("Failed to get media file") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get media file"}) + return + } + defer reader.Close() + + // Set response headers + c.Header("Content-Type", info.ContentType) + c.Header("Content-Length", fmt.Sprintf("%d", info.Size)) + c.Header("Content-Disposition", fmt.Sprintf("inline; filename=%s", info.Name)) + + // Stream the file + if _, err := io.Copy(c.Writer, reader); err != nil { + log.Error().Err(err).Msg("Failed to stream media file") + return + } +} + +func (h *Handler) DeleteMedia(c *gin.Context) { + // Get user ID from context (set by auth middleware) + userID, exists := c.Get("user_id") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) + return + } + + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid media ID"}) + return + } + + if err := h.service.DeleteMedia(c.Request.Context(), id, userID.(int)); err != nil { + log.Error().Err(err).Msg("Failed to delete media") + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete media"}) + return + } + + c.JSON(http.StatusNoContent, nil) +} diff --git a/backend/internal/handler/media_handler_test.go b/backend/internal/handler/media_handler_test.go new file mode 100644 index 0000000..5452129 --- /dev/null +++ b/backend/internal/handler/media_handler_test.go @@ -0,0 +1,524 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/http/httptest" + "strings" + "testing" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + "tss-rocks-be/internal/storage" + + "net/textproto" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" +) + +type MediaHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *MediaHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + s.handler = NewHandler(&config.Config{}, s.service) + s.router = gin.New() +} + +func (s *MediaHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestMediaHandlerSuite(t *testing.T) { + suite.Run(t, new(MediaHandlerTestSuite)) +} + +func (s *MediaHandlerTestSuite) TestListMedia() { + testCases := []struct { + name string + query string + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功列出媒体", + query: "?limit=10&offset=0", + setupMock: func() { + s.service.EXPECT(). + ListMedia(gomock.Any(), 10, 0). + Return([]*ent.Media{{ID: 1}}, nil) + }, + expectedStatus: http.StatusOK, + }, + { + name: "使用默认限制和偏移", + query: "", + setupMock: func() { + s.service.EXPECT(). + ListMedia(gomock.Any(), 10, 0). + Return([]*ent.Media{{ID: 1}}, nil) + }, + expectedStatus: http.StatusOK, + }, + { + name: "列出媒体失败", + query: "", + setupMock: func() { + s.service.EXPECT(). + ListMedia(gomock.Any(), 10, 0). + Return(nil, errors.New("failed to list media")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to list media", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + req, _ := http.NewRequest(http.MethodGet, "/media"+tc.query, nil) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // 执行请求 + s.handler.ListMedia(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedError, response["error"]) + } else { + var response []*ent.Media + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.NotEmpty(response) + } + }) + } +} + +func (s *MediaHandlerTestSuite) TestUploadMedia() { + testCases := []struct { + name string + setupRequest func() (*http.Request, error) + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功上传媒体", + setupRequest: func() (*http.Request, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + // 创建文件部分 + fileHeader := make(textproto.MIMEHeader) + fileHeader.Set("Content-Type", "image/jpeg") + fileHeader.Set("Content-Disposition", `form-data; name="file"; filename="test.jpg"`) + part, err := writer.CreatePart(fileHeader) + if err != nil { + return nil, err + } + testContent := "test content" + _, err = io.Copy(part, strings.NewReader(testContent)) + if err != nil { + return nil, err + } + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/media", body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + return req, nil + }, + setupMock: func() { + expectedFile := &multipart.FileHeader{ + Filename: "test.jpg", + Size: int64(len("test content")), + Header: textproto.MIMEHeader{ + "Content-Type": []string{"image/jpeg"}, + }, + } + s.service.EXPECT(). + Upload(gomock.Any(), gomock.Any(), 1). + DoAndReturn(func(_ context.Context, f *multipart.FileHeader, uid int) (*ent.Media, error) { + s.Equal(expectedFile.Filename, f.Filename) + s.Equal(expectedFile.Size, f.Size) + s.Equal(expectedFile.Header.Get("Content-Type"), f.Header.Get("Content-Type")) + return &ent.Media{ID: 1}, nil + }) + }, + expectedStatus: http.StatusCreated, + }, + { + name: "未授权", + setupRequest: func() (*http.Request, error) { + req := httptest.NewRequest(http.MethodPost, "/media", nil) + return req, nil + }, + setupMock: func() {}, + expectedStatus: http.StatusUnauthorized, + expectedError: "Unauthorized", + }, + { + name: "上传失败", + setupRequest: func() (*http.Request, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + // 创建文件部分 + fileHeader := make(textproto.MIMEHeader) + fileHeader.Set("Content-Type", "image/jpeg") + fileHeader.Set("Content-Disposition", `form-data; name="file"; filename="test.jpg"`) + part, err := writer.CreatePart(fileHeader) + if err != nil { + return nil, err + } + testContent := "test content" + _, err = io.Copy(part, strings.NewReader(testContent)) + if err != nil { + return nil, err + } + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/media", body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + return req, nil + }, + setupMock: func() { + s.service.EXPECT(). + Upload(gomock.Any(), gomock.Any(), 1). + Return(nil, errors.New("failed to upload")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to upload media", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + req, err := tc.setupRequest() + s.NoError(err) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // 设置用户ID(除了未授权的测试用例) + if tc.expectedError != "Unauthorized" { + c.Set("user_id", 1) + } + + // 执行请求 + s.handler.UploadMedia(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedError, response["error"]) + } else { + var response *ent.Media + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.NotNil(response) + } + }) + } +} + +func (s *MediaHandlerTestSuite) TestGetMedia() { + testCases := []struct { + name string + mediaID string + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功获取媒体", + mediaID: "1", + setupMock: func() { + media := &ent.Media{ + ID: 1, + MimeType: "image/jpeg", + OriginalName: "test.jpg", + } + s.service.EXPECT(). + GetMedia(gomock.Any(), 1). + Return(media, nil) + s.service.EXPECT(). + GetFile(gomock.Any(), 1). + Return(io.NopCloser(strings.NewReader("test content")), &storage.FileInfo{ + Size: 11, + Name: "test.jpg", + ContentType: "image/jpeg", + }, nil) + }, + expectedStatus: http.StatusOK, + }, + { + name: "无效的媒体ID", + mediaID: "invalid", + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Invalid media ID", + }, + { + name: "获取媒体元数据失败", + mediaID: "1", + setupMock: func() { + s.service.EXPECT(). + GetMedia(gomock.Any(), 1). + Return(nil, errors.New("failed to get media")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to get media", + }, + { + name: "获取媒体文件失败", + mediaID: "1", + setupMock: func() { + media := &ent.Media{ + ID: 1, + MimeType: "image/jpeg", + OriginalName: "test.jpg", + } + s.service.EXPECT(). + GetMedia(gomock.Any(), 1). + Return(media, nil) + s.service.EXPECT(). + GetFile(gomock.Any(), 1). + Return(nil, nil, errors.New("failed to get file")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to get media file", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/media/%s", tc.mediaID), nil) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // Extract ID from URL path + parts := strings.Split(strings.Trim(req.URL.Path, "/"), "/") + if len(parts) >= 2 { + c.Params = []gin.Param{{Key: "id", Value: parts[1]}} + } + + // 执行请求 + s.handler.GetMedia(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedError, response["error"]) + } else { + s.Equal("image/jpeg", w.Header().Get("Content-Type")) + s.Equal("11", w.Header().Get("Content-Length")) + s.Equal("inline; filename=test.jpg", w.Header().Get("Content-Disposition")) + s.Equal("test content", w.Body.String()) + } + }) + } +} + +func (s *MediaHandlerTestSuite) TestGetMediaFile() { + testCases := []struct { + name string + setupRequest func() (*http.Request, error) + setupMock func() + expectedStatus int + expectedBody []byte + }{ + { + name: "成功获取媒体文件", + setupRequest: func() (*http.Request, error) { + return httptest.NewRequest(http.MethodGet, "/media/1/file", nil), nil + }, + setupMock: func() { + fileContent := "test file content" + s.service.EXPECT(). + GetFile(gomock.Any(), 1). + Return(io.NopCloser(strings.NewReader(fileContent)), &storage.FileInfo{ + Name: "test.jpg", + Size: int64(len(fileContent)), + ContentType: "image/jpeg", + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []byte("test file content"), + }, + { + name: "无效的媒体ID", + setupRequest: func() (*http.Request, error) { + return httptest.NewRequest(http.MethodGet, "/media/invalid/file", nil), nil + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + }, + { + name: "获取媒体文件失败", + setupRequest: func() (*http.Request, error) { + return httptest.NewRequest(http.MethodGet, "/media/1/file", nil), nil + }, + setupMock: func() { + s.service.EXPECT(). + GetFile(gomock.Any(), 1). + Return(nil, nil, errors.New("failed to get file")) + }, + expectedStatus: http.StatusInternalServerError, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup + req, err := tc.setupRequest() + s.Require().NoError(err) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // Extract ID from URL path + parts := strings.Split(strings.Trim(req.URL.Path, "/"), "/") + if len(parts) >= 2 { + c.Params = []gin.Param{{Key: "id", Value: parts[1]}} + } + + // Setup mock + tc.setupMock() + + // Test + s.handler.GetMediaFile(c) + + // Verify + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedBody != nil { + s.Equal(tc.expectedBody, w.Body.Bytes()) + s.Equal("image/jpeg", w.Header().Get("Content-Type")) + s.Equal(fmt.Sprintf("%d", len(tc.expectedBody)), w.Header().Get("Content-Length")) + s.Equal("inline; filename=test.jpg", w.Header().Get("Content-Disposition")) + } + }) + } +} + +func (s *MediaHandlerTestSuite) TestDeleteMedia() { + testCases := []struct { + name string + mediaID string + setupMock func() + expectedStatus int + expectedError string + }{ + { + name: "成功删除媒体", + mediaID: "1", + setupMock: func() { + s.service.EXPECT(). + DeleteMedia(gomock.Any(), 1, 1). + Return(nil) + }, + expectedStatus: http.StatusNoContent, + }, + { + name: "未授权", + mediaID: "1", + setupMock: func() {}, + expectedStatus: http.StatusUnauthorized, + expectedError: "Unauthorized", + }, + { + name: "无效的媒体ID", + mediaID: "invalid", + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedError: "Invalid media ID", + }, + { + name: "删除媒体失败", + mediaID: "1", + setupMock: func() { + s.service.EXPECT(). + DeleteMedia(gomock.Any(), 1, 1). + Return(errors.New("failed to delete")) + }, + expectedStatus: http.StatusInternalServerError, + expectedError: "Failed to delete media", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // 设置 mock + tc.setupMock() + + // 创建请求 + req := httptest.NewRequest(http.MethodDelete, fmt.Sprintf("/media/%s", tc.mediaID), nil) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = req + + // Extract ID from URL path + parts := strings.Split(strings.Trim(req.URL.Path, "/"), "/") + if len(parts) >= 2 { + c.Params = []gin.Param{{Key: "id", Value: parts[1]}} + } + + // 设置用户ID(除了未授权的测试用例) + if tc.expectedError != "Unauthorized" { + c.Set("user_id", 1) + } + + // 执行请求 + s.handler.DeleteMedia(c) + + // 验证响应 + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedError != "" { + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedError, response["error"]) + } + }) + } +} diff --git a/backend/internal/handler/post_handler_test.go b/backend/internal/handler/post_handler_test.go new file mode 100644 index 0000000..949fc69 --- /dev/null +++ b/backend/internal/handler/post_handler_test.go @@ -0,0 +1,611 @@ +package handler + +import ( + "bytes" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/service/mock" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "errors" + "strings" +) + +type PostHandlerTestSuite struct { + suite.Suite + ctrl *gomock.Controller + service *mock.MockService + handler *Handler + router *gin.Engine +} + +func (s *PostHandlerTestSuite) SetupTest() { + s.ctrl = gomock.NewController(s.T()) + s.service = mock.NewMockService(s.ctrl) + cfg := &config.Config{} + s.handler = NewHandler(cfg, s.service) + + // Setup Gin router + gin.SetMode(gin.TestMode) + s.router = gin.New() + s.handler.RegisterRoutes(s.router) +} + +func (s *PostHandlerTestSuite) TearDownTest() { + s.ctrl.Finish() +} + +func TestPostHandlerSuite(t *testing.T) { + suite.Run(t, new(PostHandlerTestSuite)) +} + +// Test cases for ListPosts +func (s *PostHandlerTestSuite) TestListPosts() { + categoryID := 1 + testCases := []struct { + name string + langCode string + categoryID string + limit string + offset string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success with default language", + langCode: "", + setupMock: func() { + s.service.EXPECT(). + ListPosts(gomock.Any(), "en", nil, 10, 0). + Return([]*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + }, + }, + }, + }, + }, + { + name: "Success with specific language", + langCode: "zh", + setupMock: func() { + s.service.EXPECT(). + ListPosts(gomock.Any(), "zh", nil, 10, 0). + Return([]*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "zh", + Title: "测试帖子", + ContentMarkdown: "测试内容", + Summary: "测试摘要", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "zh", + Title: "测试帖子", + ContentMarkdown: "测试内容", + Summary: "测试摘要", + }, + }, + }, + }, + }, + }, + { + name: "Success with category filter", + langCode: "en", + categoryID: "1", + setupMock: func() { + s.service.EXPECT(). + ListPosts(gomock.Any(), "en", &categoryID, 10, 0). + Return([]*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Post{ + { + ID: 1, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + }, + }, + }, + }, + }, + { + name: "Success with pagination", + langCode: "en", + limit: "2", + offset: "1", + setupMock: func() { + s.service.EXPECT(). + ListPosts(gomock.Any(), "en", nil, 2, 1). + Return([]*ent.Post{ + { + ID: 2, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post 2", + ContentMarkdown: "Test Content 2", + Summary: "Test Summary 2", + }, + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: []*ent.Post{ + { + ID: 2, + Status: "published", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post 2", + ContentMarkdown: "Test Content 2", + Summary: "Test Summary 2", + }, + }, + }, + }, + }, + }, + { + name: "Service Error", + langCode: "en", + setupMock: func() { + s.service.EXPECT(). + ListPosts(gomock.Any(), "en", nil, 10, 0). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup mock + tc.setupMock() + + // Create request + url := "/api/v1/posts" + if tc.langCode != "" { + url += "?lang=" + tc.langCode + } + if tc.categoryID != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "category_id=" + tc.categoryID + } + if tc.limit != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "limit=" + tc.limit + } + if tc.offset != "" { + if strings.Contains(url, "?") { + url += "&" + } else { + url += "?" + } + url += "offset=" + tc.offset + } + + req := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + + // Perform request + s.router.ServeHTTP(w, req) + + // Assert response + s.Equal(tc.expectedStatus, w.Code) + if tc.expectedBody != nil { + var response []*ent.Post + err := json.Unmarshal(w.Body.Bytes(), &response) + s.NoError(err) + s.Equal(tc.expectedBody, response) + } + }) + } +} + +// Test cases for GetPost +func (s *PostHandlerTestSuite) TestGetPost() { + testCases := []struct { + name string + langCode string + slug string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success with default language", + langCode: "", + slug: "test-post", + setupMock: func() { + s.service.EXPECT(). + GetPostBySlug(gomock.Any(), "en", "test-post"). + Return(&ent.Post{ + ID: 1, + Status: "published", + Slug: "test-post", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: gin.H{ + "id": 1, + "status": "published", + "slug": "test-post", + "edges": gin.H{ + "contents": []gin.H{ + { + "language_code": "en", + "title": "Test Post", + "content_markdown": "Test Content", + "summary": "Test Summary", + }, + }, + }, + }, + }, + { + name: "Success with specific language", + langCode: "zh", + slug: "test-post", + setupMock: func() { + s.service.EXPECT(). + GetPostBySlug(gomock.Any(), "zh", "test-post"). + Return(&ent.Post{ + ID: 1, + Status: "published", + Slug: "test-post", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{ + { + LanguageCode: "zh", + Title: "测试帖子", + ContentMarkdown: "测试内容", + Summary: "测试摘要", + }, + }, + }, + }, nil) + }, + expectedStatus: http.StatusOK, + expectedBody: gin.H{ + "id": 1, + "status": "published", + "slug": "test-post", + "edges": gin.H{ + "contents": []gin.H{ + { + "language_code": "zh", + "title": "测试帖子", + "content_markdown": "测试内容", + "summary": "测试摘要", + }, + }, + }, + }, + }, + { + name: "Service error", + slug: "test-post", + setupMock: func() { + s.service.EXPECT(). + GetPostBySlug(gomock.Any(), "en", "test-post"). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to get post"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + url := "/api/v1/posts/" + tc.slug + if tc.langCode != "" { + url += "?lang=" + tc.langCode + } + + req := httptest.NewRequest(http.MethodGet, url, nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +// Test cases for CreatePost +func (s *PostHandlerTestSuite) TestCreatePost() { + testCases := []struct { + name string + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + setupMock: func() { + s.service.EXPECT(). + CreatePost(gomock.Any(), "draft"). + Return(&ent.Post{ + ID: 1, + Status: "draft", + Edges: ent.PostEdges{ + Contents: []*ent.PostContent{}, + }, + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: gin.H{ + "id": 1, + "status": "draft", + "edges": gin.H{ + "contents": []gin.H{}, + }, + }, + }, + { + name: "Service error", + setupMock: func() { + s.service.EXPECT(). + CreatePost(gomock.Any(), "draft"). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to create post"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + req := httptest.NewRequest(http.MethodPost, "/api/v1/posts", nil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} + +// Test cases for AddPostContent +func (s *PostHandlerTestSuite) TestAddPostContent() { + testCases := []struct { + name string + postID string + body interface{} + setupMock func() + expectedStatus int + expectedBody interface{} + }{ + { + name: "Success", + postID: "1", + body: AddPostContentRequest{ + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + MetaKeywords: "test,keywords", + MetaDescription: "Test meta description", + }, + setupMock: func() { + s.service.EXPECT(). + AddPostContent( + gomock.Any(), + 1, + "en", + "Test Post", + "Test Content", + "Test Summary", + "test,keywords", + "Test meta description", + ). + Return(&ent.PostContent{ + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + MetaKeywords: "test,keywords", + MetaDescription: "Test meta description", + Edges: ent.PostContentEdges{}, + }, nil) + }, + expectedStatus: http.StatusCreated, + expectedBody: gin.H{ + "language_code": "en", + "title": "Test Post", + "content_markdown": "Test Content", + "summary": "Test Summary", + "meta_keywords": "test,keywords", + "meta_description": "Test meta description", + "edges": gin.H{}, + }, + }, + { + name: "Invalid post ID", + postID: "invalid", + body: AddPostContentRequest{ + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Invalid post ID"}, + }, + { + name: "Invalid request body", + postID: "1", + body: map[string]interface{}{ + "language_code": "en", + // Missing required fields + }, + setupMock: func() {}, + expectedStatus: http.StatusBadRequest, + expectedBody: gin.H{"error": "Key: 'AddPostContentRequest.Title' Error:Field validation for 'Title' failed on the 'required' tag\nKey: 'AddPostContentRequest.ContentMarkdown' Error:Field validation for 'ContentMarkdown' failed on the 'required' tag\nKey: 'AddPostContentRequest.Summary' Error:Field validation for 'Summary' failed on the 'required' tag"}, + }, + { + name: "Service error", + postID: "1", + body: AddPostContentRequest{ + LanguageCode: "en", + Title: "Test Post", + ContentMarkdown: "Test Content", + Summary: "Test Summary", + MetaKeywords: "test,keywords", + MetaDescription: "Test meta description", + }, + setupMock: func() { + s.service.EXPECT(). + AddPostContent( + gomock.Any(), + 1, + "en", + "Test Post", + "Test Content", + "Test Summary", + "test,keywords", + "Test meta description", + ). + Return(nil, errors.New("service error")) + }, + expectedStatus: http.StatusInternalServerError, + expectedBody: gin.H{"error": "Failed to add post content"}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + tc.setupMock() + + body, err := json.Marshal(tc.body) + s.NoError(err, "Failed to marshal request body") + + req := httptest.NewRequest(http.MethodPost, "/api/v1/posts/"+tc.postID+"/contents", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + s.Equal(tc.expectedStatus, w.Code, "HTTP status code mismatch") + + if tc.expectedBody != nil { + expectedJSON, err := json.Marshal(tc.expectedBody) + s.NoError(err, "Failed to marshal expected body") + s.JSONEq(string(expectedJSON), w.Body.String(), "Response body mismatch") + } + }) + } +} diff --git a/backend/internal/middleware/accesslog.go b/backend/internal/middleware/accesslog.go new file mode 100644 index 0000000..2495294 --- /dev/null +++ b/backend/internal/middleware/accesslog.go @@ -0,0 +1,192 @@ +package middleware + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/rs/zerolog" + "gopkg.in/natefinch/lumberjack.v2" + "tss-rocks-be/internal/types" +) + +// AccessLogConfig 访问日志配置 +type AccessLogConfig struct { + // 是否启用控制台输出 + EnableConsole bool `yaml:"enable_console"` + // 是否启用文件日志 + EnableFile bool `yaml:"enable_file"` + // 日志文件路径 + FilePath string `yaml:"file_path"` + // 日志格式 (json 或 text) + Format string `yaml:"format"` + // 日志级别 + Level string `yaml:"level"` + // 日志轮转配置 + Rotation struct { + MaxSize int `yaml:"max_size"` // 每个日志文件的最大大小(MB) + MaxAge int `yaml:"max_age"` // 保留旧日志文件的最大天数 + MaxBackups int `yaml:"max_backups"` // 保留的旧日志文件的最大数量 + Compress bool `yaml:"compress"` // 是否压缩旧日志文件 + LocalTime bool `yaml:"local_time"` // 使用本地时间作为轮转时间 + } `yaml:"rotation"` +} + +// accessLogger 访问日志记录器 +type accessLogger struct { + consoleLogger *zerolog.Logger + fileLogger *zerolog.Logger + logWriter *lumberjack.Logger + config *types.AccessLogConfig +} + +// Close 关闭日志文件 +func (l *accessLogger) Close() error { + if l.logWriter != nil { + return l.logWriter.Close() + } + return nil +} + +// newAccessLogger 创建新的访问日志记录器 +func newAccessLogger(config *types.AccessLogConfig) (*accessLogger, error) { + var consoleLogger, fileLogger *zerolog.Logger + var logWriter *lumberjack.Logger + + // 设置日志级别 + level, err := zerolog.ParseLevel(config.Level) + if err != nil { + level = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(level) + + // 配置控制台日志 + if config.EnableConsole { + logger := zerolog.New(os.Stdout). + With(). + Timestamp(). + Logger() + + if config.Format == "text" { + logger = logger.Output(zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}) + } + consoleLogger = &logger + } + + // 配置文件日志 + if config.EnableFile { + // 确保日志目录存在 + if err := os.MkdirAll(filepath.Dir(config.FilePath), 0755); err != nil { + return nil, fmt.Errorf("failed to create log directory: %w", err) + } + + // 配置日志轮转 + logWriter = &lumberjack.Logger{ + Filename: config.FilePath, + MaxSize: config.Rotation.MaxSize, // MB + MaxAge: config.Rotation.MaxAge, // days + MaxBackups: config.Rotation.MaxBackups, // files + Compress: config.Rotation.Compress, // 是否压缩 + LocalTime: config.Rotation.LocalTime, // 使用本地时间 + } + + logger := zerolog.New(logWriter). + With(). + Timestamp(). + Logger() + + fileLogger = &logger + } + + return &accessLogger{ + consoleLogger: consoleLogger, + fileLogger: fileLogger, + logWriter: logWriter, + config: config, + }, nil +} + +// logEvent 记录日志事件 +func (l *accessLogger) logEvent(fields map[string]interface{}, msg string) { + if l.consoleLogger != nil { + event := l.consoleLogger.Info() + for k, v := range fields { + event = event.Interface(k, v) + } + event.Msg(msg) + } + if l.fileLogger != nil { + event := l.fileLogger.Info() + for k, v := range fields { + event = event.Interface(k, v) + } + event.Msg(msg) + } +} + +// AccessLog 创建访问日志中间件 +func AccessLog(config *types.AccessLogConfig) (gin.HandlerFunc, error) { + logger, err := newAccessLogger(config) + if err != nil { + return nil, err + } + + return func(c *gin.Context) { + // 用于测试时关闭日志文件 + if c == nil { + if err := logger.Close(); err != nil { + fmt.Printf("Error closing log file: %v\n", err) + } + return + } + + start := time.Now() + requestID := uuid.New().String() + path := c.Request.URL.Path + query := c.Request.URL.RawQuery + + // 设置请求ID到上下文 + c.Set("request_id", requestID) + + // 处理请求 + c.Next() + + // 计算处理时间 + latency := time.Since(start) + + // 获取用户ID(如果已认证) + var userID interface{} + if id, exists := c.Get("user_id"); exists { + userID = id + } + + // 准备日志字段 + fields := map[string]interface{}{ + "request_id": requestID, + "method": c.Request.Method, + "path": path, + "query": query, + "ip": c.ClientIP(), + "user_agent": c.Request.UserAgent(), + "status": c.Writer.Status(), + "size": c.Writer.Size(), + "latency_ms": latency.Milliseconds(), + "component": "access_log", + } + + if userID != nil { + fields["user_id"] = userID + } + + // 如果有错误,添加到日志中 + if len(c.Errors) > 0 { + fields["error"] = c.Errors.String() + } + + // 记录日志 + logger.logEvent(fields, fmt.Sprintf("%s %s", c.Request.Method, path)) + }, nil +} diff --git a/backend/internal/middleware/accesslog_test.go b/backend/internal/middleware/accesslog_test.go new file mode 100644 index 0000000..085555b --- /dev/null +++ b/backend/internal/middleware/accesslog_test.go @@ -0,0 +1,238 @@ +package middleware + +import ( + "bytes" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "time" + "tss-rocks-be/internal/types" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" +) + +func TestAccessLog(t *testing.T) { + // 设置测试临时目录 + tmpDir := t.TempDir() + logPath := filepath.Join(tmpDir, "test.log") + + testCases := []struct { + name string + config *types.AccessLogConfig + expectedError bool + setupRequest func(*http.Request) + validateOutput func(*testing.T, *httptest.ResponseRecorder, string) + }{ + { + name: "Console logging only", + config: &types.AccessLogConfig{ + EnableConsole: true, + EnableFile: false, + Format: "json", + Level: "info", + }, + expectedError: false, + setupRequest: func(req *http.Request) { + req.Header.Set("User-Agent", "test-agent") + }, + validateOutput: func(t *testing.T, w *httptest.ResponseRecorder, logOutput string) { + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, logOutput, "GET /test") + assert.Contains(t, logOutput, "test-agent") + }, + }, + { + name: "File logging only", + config: &types.AccessLogConfig{ + EnableConsole: false, + EnableFile: true, + FilePath: logPath, + Format: "json", + Level: "info", + Rotation: struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxBackups int `yaml:"max_backups"` + Compress bool `yaml:"compress"` + LocalTime bool `yaml:"local_time"` + }{ + MaxSize: 1, + MaxAge: 1, + MaxBackups: 1, + Compress: false, + LocalTime: true, + }, + }, + expectedError: false, + setupRequest: func(req *http.Request) { + req.Header.Set("User-Agent", "test-agent") + }, + validateOutput: func(t *testing.T, w *httptest.ResponseRecorder, logOutput string) { + assert.Equal(t, http.StatusOK, w.Code) + + // 读取日志文件内容 + content, err := os.ReadFile(logPath) + assert.NoError(t, err) + assert.Contains(t, string(content), "GET /test") + assert.Contains(t, string(content), "test-agent") + }, + }, + { + name: "Both console and file logging", + config: &types.AccessLogConfig{ + EnableConsole: true, + EnableFile: true, + FilePath: logPath, + Format: "json", + Level: "info", + Rotation: struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxBackups int `yaml:"max_backups"` + Compress bool `yaml:"compress"` + LocalTime bool `yaml:"local_time"` + }{ + MaxSize: 1, + MaxAge: 1, + MaxBackups: 1, + Compress: false, + LocalTime: true, + }, + }, + expectedError: false, + setupRequest: func(req *http.Request) { + req.Header.Set("User-Agent", "test-agent") + }, + validateOutput: func(t *testing.T, w *httptest.ResponseRecorder, logOutput string) { + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, logOutput, "GET /test") + assert.Contains(t, logOutput, "test-agent") + + // 读取日志文件内容 + content, err := os.ReadFile(logPath) + assert.NoError(t, err) + assert.Contains(t, string(content), "GET /test") + assert.Contains(t, string(content), "test-agent") + }, + }, + { + name: "With authenticated user", + config: &types.AccessLogConfig{ + EnableConsole: true, + EnableFile: false, + Format: "json", + Level: "info", + }, + expectedError: false, + setupRequest: func(req *http.Request) { + req.Header.Set("User-Agent", "test-agent") + }, + validateOutput: func(t *testing.T, w *httptest.ResponseRecorder, logOutput string) { + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, logOutput, "GET /test") + assert.Contains(t, logOutput, "test-agent") + assert.Contains(t, logOutput, "test-user") + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // 捕获标准输出 + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + // 创建一个新的 gin 引擎 + gin.SetMode(gin.TestMode) + router := gin.New() + + // 创建访问日志中间件 + middleware, err := AccessLog(tc.config) + if tc.expectedError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + // 添加测试路由 + router.Use(middleware) + router.GET("/test", func(c *gin.Context) { + // 如果是测试认证用户的情况,设置用户ID + if tc.name == "With authenticated user" { + c.Set("user_id", "test-user") + } + c.Status(http.StatusOK) + }) + + // 创建测试请求 + req := httptest.NewRequest("GET", "/test", nil) + if tc.setupRequest != nil { + tc.setupRequest(req) + } + rec := httptest.NewRecorder() + + // 执行请求 + router.ServeHTTP(rec, req) + + // 恢复标准输出并获取输出内容 + w.Close() + var buf bytes.Buffer + io.Copy(&buf, r) + os.Stdout = oldStdout + + // 验证输出 + if tc.validateOutput != nil { + tc.validateOutput(t, rec, buf.String()) + } + + // 关闭日志文件 + if tc.config.EnableFile { + // 调用中间件函数来关闭日志文件 + middleware(nil) + // 等待一小段时间确保文件完全关闭 + time.Sleep(100 * time.Millisecond) + } + }) + } +} + +func TestAccessLogInvalidConfig(t *testing.T) { + testCases := []struct { + name string + config *types.AccessLogConfig + expectedError bool + }{ + { + name: "Invalid log level", + config: &types.AccessLogConfig{ + EnableConsole: true, + Level: "invalid_level", + }, + expectedError: false, // 应该使用默认的 info 级别 + }, + { + name: "Invalid file path", + config: &types.AccessLogConfig{ + EnableFile: true, + FilePath: "/dev/null/nonexistent/test.log", // 在所有操作系统上都无效的路径 + }, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := AccessLog(tc.config) + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/backend/internal/middleware/auth.go b/backend/internal/middleware/auth.go new file mode 100644 index 0000000..bc88389 --- /dev/null +++ b/backend/internal/middleware/auth.go @@ -0,0 +1,82 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt/v5" + "github.com/rs/zerolog/log" +) + +// AuthMiddleware creates a middleware for JWT authentication +func AuthMiddleware(jwtSecret string) gin.HandlerFunc { + return func(c *gin.Context) { + authHeader := c.GetHeader("Authorization") + if authHeader == "" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Authorization header is required"}) + c.Abort() + return + } + + parts := strings.Split(authHeader, " ") + if len(parts) != 2 || parts[0] != "Bearer" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Authorization header format must be Bearer {token}"}) + c.Abort() + return + } + + token, err := jwt.Parse(parts[1], func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, jwt.ErrSignatureInvalid + } + return []byte(jwtSecret), nil + }) + + if err != nil { + log.Error().Err(err).Msg("Failed to parse token") + c.JSON(http.StatusUnauthorized, gin.H{"error": "Invalid token"}) + c.Abort() + return + } + + if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { + c.Set("user_id", claims["sub"]) + c.Set("user_role", claims["role"]) + c.Next() + } else { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Invalid token"}) + c.Abort() + return + } + } +} + +// RoleMiddleware creates a middleware for role-based authorization +func RoleMiddleware(roles ...string) gin.HandlerFunc { + return func(c *gin.Context) { + userRole, exists := c.Get("user_role") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "User role not found"}) + c.Abort() + return + } + + roleStr, ok := userRole.(string) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid user role type"}) + c.Abort() + return + } + + for _, role := range roles { + if role == roleStr { + c.Next() + return + } + } + + c.JSON(http.StatusForbidden, gin.H{"error": "Insufficient permissions"}) + c.Abort() + } +} diff --git a/backend/internal/middleware/auth_test.go b/backend/internal/middleware/auth_test.go new file mode 100644 index 0000000..ffb1e1b --- /dev/null +++ b/backend/internal/middleware/auth_test.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "encoding/json" + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func createTestToken(secret string, claims jwt.MapClaims) string { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + signedToken, _ := token.SignedString([]byte(secret)) + return signedToken +} + +func TestAuthMiddleware(t *testing.T) { + jwtSecret := "test-secret" + + testCases := []struct { + name string + setupAuth func(*http.Request) + expectedStatus int + expectedBody map[string]string + checkUserData bool + expectedUserID string + expectedRole string + }{ + { + name: "No Authorization header", + setupAuth: func(req *http.Request) {}, + expectedStatus: http.StatusUnauthorized, + expectedBody: map[string]string{"error": "Authorization header is required"}, + }, + { + name: "Invalid Authorization format", + setupAuth: func(req *http.Request) { + req.Header.Set("Authorization", "InvalidFormat") + }, + expectedStatus: http.StatusUnauthorized, + expectedBody: map[string]string{"error": "Authorization header format must be Bearer {token}"}, + }, + { + name: "Invalid token", + setupAuth: func(req *http.Request) { + req.Header.Set("Authorization", "Bearer invalid.token.here") + }, + expectedStatus: http.StatusUnauthorized, + expectedBody: map[string]string{"error": "Invalid token"}, + }, + { + name: "Valid token", + setupAuth: func(req *http.Request) { + claims := jwt.MapClaims{ + "sub": "user123", + "role": "user", + "exp": time.Now().Add(time.Hour).Unix(), + } + token := createTestToken(jwtSecret, claims) + req.Header.Set("Authorization", "Bearer "+token) + }, + expectedStatus: http.StatusOK, + checkUserData: true, + expectedUserID: "user123", + expectedRole: "user", + }, + { + name: "Expired token", + setupAuth: func(req *http.Request) { + claims := jwt.MapClaims{ + "sub": "user123", + "role": "user", + "exp": time.Now().Add(-time.Hour).Unix(), + } + token := createTestToken(jwtSecret, claims) + req.Header.Set("Authorization", "Bearer "+token) + }, + expectedStatus: http.StatusUnauthorized, + expectedBody: map[string]string{"error": "Invalid token"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + router := gin.New() + + // 添加认证中间件 + router.Use(AuthMiddleware(jwtSecret)) + + // 测试路由 + router.GET("/test", func(c *gin.Context) { + if tc.checkUserData { + userID, exists := c.Get("user_id") + assert.True(t, exists) + assert.Equal(t, tc.expectedUserID, userID) + + role, exists := c.Get("user_role") + assert.True(t, exists) + assert.Equal(t, tc.expectedRole, role) + } + c.Status(http.StatusOK) + }) + + // 创建请求 + req := httptest.NewRequest("GET", "/test", nil) + tc.setupAuth(req) + rec := httptest.NewRecorder() + + // 执行请求 + router.ServeHTTP(rec, req) + + // 验证响应 + assert.Equal(t, tc.expectedStatus, rec.Code) + + if tc.expectedBody != nil { + var response map[string]string + err := json.NewDecoder(rec.Body).Decode(&response) + assert.NoError(t, err) + assert.Equal(t, tc.expectedBody, response) + } + }) + } +} + +func TestRoleMiddleware(t *testing.T) { + testCases := []struct { + name string + setupContext func(*gin.Context) + allowedRoles []string + expectedStatus int + expectedBody map[string]string + }{ + { + name: "No user role", + setupContext: func(c *gin.Context) { + // 不设置用户角色 + }, + allowedRoles: []string{"admin"}, + expectedStatus: http.StatusUnauthorized, + expectedBody: map[string]string{"error": "User role not found"}, + }, + { + name: "Invalid role type", + setupContext: func(c *gin.Context) { + c.Set("user_role", 123) // 设置错误类型的角色 + }, + allowedRoles: []string{"admin"}, + expectedStatus: http.StatusInternalServerError, + expectedBody: map[string]string{"error": "Invalid user role type"}, + }, + { + name: "Insufficient permissions", + setupContext: func(c *gin.Context) { + c.Set("user_role", "user") + }, + allowedRoles: []string{"admin"}, + expectedStatus: http.StatusForbidden, + expectedBody: map[string]string{"error": "Insufficient permissions"}, + }, + { + name: "Allowed role", + setupContext: func(c *gin.Context) { + c.Set("user_role", "admin") + }, + allowedRoles: []string{"admin"}, + expectedStatus: http.StatusOK, + }, + { + name: "One of multiple allowed roles", + setupContext: func(c *gin.Context) { + c.Set("user_role", "editor") + }, + allowedRoles: []string{"admin", "editor", "moderator"}, + expectedStatus: http.StatusOK, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + router := gin.New() + + // 添加角色中间件 + router.Use(func(c *gin.Context) { + tc.setupContext(c) + c.Next() + }) + router.Use(RoleMiddleware(tc.allowedRoles...)) + + // 测试路由 + router.GET("/test", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + + // 创建请求 + req := httptest.NewRequest("GET", "/test", nil) + rec := httptest.NewRecorder() + + // 执行请求 + router.ServeHTTP(rec, req) + + // 验证响应 + assert.Equal(t, tc.expectedStatus, rec.Code) + + if tc.expectedBody != nil { + var response map[string]string + err := json.NewDecoder(rec.Body).Decode(&response) + assert.NoError(t, err) + assert.Equal(t, tc.expectedBody, response) + } + }) + } +} diff --git a/backend/internal/middleware/cors.go b/backend/internal/middleware/cors.go new file mode 100644 index 0000000..5ab9c1d --- /dev/null +++ b/backend/internal/middleware/cors.go @@ -0,0 +1,22 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" +) + +// CORS middleware +func CORS() gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer.Header().Set("Access-Control-Allow-Origin", "*") + c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") + c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With") + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE") + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(204) + return + } + + c.Next() + } +} diff --git a/backend/internal/middleware/cors_test.go b/backend/internal/middleware/cors_test.go new file mode 100644 index 0000000..bf187f9 --- /dev/null +++ b/backend/internal/middleware/cors_test.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" +) + +func TestCORS(t *testing.T) { + testCases := []struct { + name string + method string + expectedStatus int + checkHeaders bool + }{ + { + name: "Normal GET request", + method: "GET", + expectedStatus: http.StatusOK, + checkHeaders: true, + }, + { + name: "OPTIONS request", + method: "OPTIONS", + expectedStatus: http.StatusNoContent, + checkHeaders: true, + }, + { + name: "POST request", + method: "POST", + expectedStatus: http.StatusOK, + checkHeaders: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // 创建一个新的 gin 引擎 + gin.SetMode(gin.TestMode) + router := gin.New() + + // 添加 CORS 中间件 + router.Use(CORS()) + + // 添加测试路由 + router.Any("/test", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + + // 创建测试请求 + req := httptest.NewRequest(tc.method, "/test", nil) + rec := httptest.NewRecorder() + + // 执行请求 + router.ServeHTTP(rec, req) + + // 验证状态码 + assert.Equal(t, tc.expectedStatus, rec.Code) + + if tc.checkHeaders { + // 验证 CORS 头部 + headers := rec.Header() + assert.Equal(t, "*", headers.Get("Access-Control-Allow-Origin")) + assert.Equal(t, "true", headers.Get("Access-Control-Allow-Credentials")) + assert.Contains(t, headers.Get("Access-Control-Allow-Headers"), "Content-Type") + assert.Contains(t, headers.Get("Access-Control-Allow-Headers"), "Authorization") + assert.Contains(t, headers.Get("Access-Control-Allow-Methods"), "POST") + assert.Contains(t, headers.Get("Access-Control-Allow-Methods"), "GET") + assert.Contains(t, headers.Get("Access-Control-Allow-Methods"), "PUT") + assert.Contains(t, headers.Get("Access-Control-Allow-Methods"), "DELETE") + } + }) + } +} diff --git a/backend/internal/middleware/ratelimit.go b/backend/internal/middleware/ratelimit.go new file mode 100644 index 0000000..9659f83 --- /dev/null +++ b/backend/internal/middleware/ratelimit.go @@ -0,0 +1,107 @@ +package middleware + +import ( + "net/http" + "sync" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/time/rate" + "tss-rocks-be/internal/types" +) + +// ipLimiter IP限流器 +type ipLimiter struct { + limiter *rate.Limiter + lastSeen time.Time +} + +// rateLimiter 限流器管理器 +type rateLimiter struct { + ips map[string]*ipLimiter + mu sync.RWMutex + config *types.RateLimitConfig + routes map[string]*rate.Limiter +} + +// newRateLimiter 创建新的限流器 +func newRateLimiter(config *types.RateLimitConfig) *rateLimiter { + // 初始化路由限流器 + routes := make(map[string]*rate.Limiter) + for path, cfg := range config.RouteRates { + routes[path] = rate.NewLimiter(rate.Limit(cfg.Rate), cfg.Burst) + } + + rl := &rateLimiter{ + ips: make(map[string]*ipLimiter), + config: config, + routes: routes, + } + + // 启动清理过期IP限流器的goroutine + go rl.cleanupIPLimiters() + + return rl +} + +// cleanupIPLimiters 清理过期的IP限流器 +func (rl *rateLimiter) cleanupIPLimiters() { + for { + time.Sleep(time.Hour) // 每小时清理一次 + + rl.mu.Lock() + for ip, limiter := range rl.ips { + if time.Since(limiter.lastSeen) > time.Hour { + delete(rl.ips, ip) + } + } + rl.mu.Unlock() + } +} + +// getLimiter 获取IP限流器 +func (rl *rateLimiter) getLimiter(ip string) *rate.Limiter { + rl.mu.Lock() + defer rl.mu.Unlock() + + v, exists := rl.ips[ip] + if !exists { + limiter := rate.NewLimiter(rate.Limit(rl.config.IPRate), rl.config.IPBurst) + rl.ips[ip] = &ipLimiter{limiter: limiter, lastSeen: time.Now()} + return limiter + } + + v.lastSeen = time.Now() + return v.limiter +} + +// RateLimit 创建限流中间件 +func RateLimit(config *types.RateLimitConfig) gin.HandlerFunc { + rl := newRateLimiter(config) + + return func(c *gin.Context) { + // 检查路由限流 + path := c.Request.URL.Path + if limiter, ok := rl.routes[path]; ok { + if !limiter.Allow() { + c.JSON(http.StatusTooManyRequests, gin.H{ + "error": "too many requests for this route", + }) + c.Abort() + return + } + } + + // 检查IP限流 + limiter := rl.getLimiter(c.ClientIP()) + if !limiter.Allow() { + c.JSON(http.StatusTooManyRequests, gin.H{ + "error": "too many requests from this IP", + }) + c.Abort() + return + } + + c.Next() + } +} diff --git a/backend/internal/middleware/ratelimit_test.go b/backend/internal/middleware/ratelimit_test.go new file mode 100644 index 0000000..381f0af --- /dev/null +++ b/backend/internal/middleware/ratelimit_test.go @@ -0,0 +1,207 @@ +package middleware + +import ( + "encoding/json" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" + "time" + "tss-rocks-be/internal/types" +) + +func TestRateLimit(t *testing.T) { + testCases := []struct { + name string + config *types.RateLimitConfig + setupTest func(*gin.Engine) + runTest func(*testing.T, *gin.Engine) + expectedStatus int + expectedBody map[string]string + }{ + { + name: "IP rate limit", + config: &types.RateLimitConfig{ + IPRate: 1, // 每秒1个请求 + IPBurst: 1, + }, + setupTest: func(router *gin.Engine) { + router.GET("/test", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + }, + runTest: func(t *testing.T, router *gin.Engine) { + // 第一个请求应该成功 + req := httptest.NewRequest("GET", "/test", nil) + req.RemoteAddr = "192.168.1.1:1234" + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + + // 第二个请求应该被限制 + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusTooManyRequests, rec.Code) + var response map[string]string + err := json.NewDecoder(rec.Body).Decode(&response) + assert.NoError(t, err) + assert.Equal(t, "too many requests from this IP", response["error"]) + + // 等待限流器重置 + time.Sleep(time.Second) + + // 第三个请求应该成功 + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + }, + }, + { + name: "Route rate limit", + config: &types.RateLimitConfig{ + IPRate: 100, // 设置较高的 IP 限流,以便测试路由限流 + IPBurst: 10, + RouteRates: map[string]struct { + Rate int `yaml:"rate"` + Burst int `yaml:"burst"` + }{ + "/limited": { + Rate: 1, + Burst: 1, + }, + }, + }, + setupTest: func(router *gin.Engine) { + router.GET("/limited", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + router.GET("/unlimited", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + }, + runTest: func(t *testing.T, router *gin.Engine) { + // 测试限流路由 + req := httptest.NewRequest("GET", "/limited", nil) + req.RemoteAddr = "192.168.1.2:1234" + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + + // 等待一小段时间确保限流器生效 + time.Sleep(10 * time.Millisecond) + + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusTooManyRequests, rec.Code) + var response map[string]string + err := json.NewDecoder(rec.Body).Decode(&response) + assert.NoError(t, err) + assert.Equal(t, "too many requests for this route", response["error"]) + + // 测试未限流路由 + req = httptest.NewRequest("GET", "/unlimited", nil) + req.RemoteAddr = "192.168.1.2:1234" + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + + // 等待一小段时间确保限流器生效 + time.Sleep(10 * time.Millisecond) + + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req) + assert.Equal(t, http.StatusOK, rec.Code) + }, + }, + { + name: "Multiple IPs", + config: &types.RateLimitConfig{ + IPRate: 1, + IPBurst: 1, + }, + setupTest: func(router *gin.Engine) { + router.GET("/test", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + }, + runTest: func(t *testing.T, router *gin.Engine) { + // IP1 的请求 + req1 := httptest.NewRequest("GET", "/test", nil) + req1.RemoteAddr = "192.168.1.3:1234" + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req1) + assert.Equal(t, http.StatusOK, rec.Code) + + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req1) + assert.Equal(t, http.StatusTooManyRequests, rec.Code) + + // IP2 的请求应该不受 IP1 的限制影响 + req2 := httptest.NewRequest("GET", "/test", nil) + req2.RemoteAddr = "192.168.1.4:1234" + rec = httptest.NewRecorder() + router.ServeHTTP(rec, req2) + assert.Equal(t, http.StatusOK, rec.Code) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + router := gin.New() + + // 添加限流中间件 + router.Use(RateLimit(tc.config)) + + // 设置测试路由 + tc.setupTest(router) + + // 运行测试 + tc.runTest(t, router) + }) + } +} + +func TestRateLimiterCleanup(t *testing.T) { + config := &types.RateLimitConfig{ + IPRate: 1, + IPBurst: 1, + } + + rl := newRateLimiter(config) + + // 添加一些IP限流器 + ips := []string{"192.168.1.1", "192.168.1.2", "192.168.1.3"} + for _, ip := range ips { + rl.getLimiter(ip) + } + + // 验证IP限流器已创建 + rl.mu.RLock() + assert.Equal(t, len(ips), len(rl.ips)) + rl.mu.RUnlock() + + // 修改一些IP的最后访问时间为1小时前 + rl.mu.Lock() + rl.ips["192.168.1.1"].lastSeen = time.Now().Add(-2 * time.Hour) + rl.ips["192.168.1.2"].lastSeen = time.Now().Add(-2 * time.Hour) + rl.mu.Unlock() + + // 手动触发清理 + rl.mu.Lock() + for ip, limiter := range rl.ips { + if time.Since(limiter.lastSeen) > time.Hour { + delete(rl.ips, ip) + } + } + rl.mu.Unlock() + + // 验证过期的IP限流器已被删除 + rl.mu.RLock() + assert.Equal(t, 1, len(rl.ips)) + _, exists := rl.ips["192.168.1.3"] + assert.True(t, exists) + rl.mu.RUnlock() +} diff --git a/backend/internal/middleware/rbac.go b/backend/internal/middleware/rbac.go new file mode 100644 index 0000000..bef25b3 --- /dev/null +++ b/backend/internal/middleware/rbac.go @@ -0,0 +1,110 @@ +package middleware + +import ( + "context" + "fmt" + "net/http" + + "tss-rocks-be/ent" + "tss-rocks-be/ent/user" + "tss-rocks-be/internal/auth" + + "github.com/gin-gonic/gin" +) + +// RequirePermission creates a middleware that checks if the user has the required permission +func RequirePermission(client *ent.Client, resource, action string) gin.HandlerFunc { + return func(c *gin.Context) { + // Get user from context + userID, exists := c.Get(auth.UserIDKey) + if !exists { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{ + "error": "Unauthorized", + }) + return + } + + // Get user with roles + user, err := client.User.Query(). + Where(user.ID(userID.(int))). + WithRoles(func(q *ent.RoleQuery) { + q.WithPermissions() + }). + Only(context.Background()) + + if err != nil { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{ + "error": "User not found", + }) + return + } + + // Check if user has the required permission through any of their roles + hasPermission := false + for _, r := range user.Edges.Roles { + for _, p := range r.Edges.Permissions { + if p.Resource == resource && p.Action == action { + hasPermission = true + break + } + } + if hasPermission { + break + } + } + + if !hasPermission { + c.AbortWithStatusJSON(http.StatusForbidden, gin.H{ + "error": fmt.Sprintf("Missing required permission: %s:%s", resource, action), + }) + return + } + + c.Next() + } +} + +// RequireRole creates a middleware that checks if the user has the required role +func RequireRole(client *ent.Client, roleName string) gin.HandlerFunc { + return func(c *gin.Context) { + // Get user from context + userID, exists := c.Get(auth.UserIDKey) + if !exists { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{ + "error": "Unauthorized", + }) + return + } + + // Get user with roles + user, err := client.User.Query(). + Where(user.ID(userID.(int))). + WithRoles(). + Only(context.Background()) + + if err != nil { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{ + "error": "User not found", + }) + return + } + + // Check if user has the required role + hasRole := false + for _, r := range user.Edges.Roles { + if r.Name == roleName { + hasRole = true + break + } + } + + if !hasRole { + c.AbortWithStatusJSON(http.StatusForbidden, gin.H{ + "error": fmt.Sprintf("Required role: %s", roleName), + }) + return + } + + c.Next() + } +} diff --git a/backend/internal/middleware/upload.go b/backend/internal/middleware/upload.go new file mode 100644 index 0000000..91f9e53 --- /dev/null +++ b/backend/internal/middleware/upload.go @@ -0,0 +1,159 @@ +package middleware + +import ( + "bytes" + "fmt" + "io" + "net/http" + "path/filepath" + "strings" + + "github.com/gin-gonic/gin" + "tss-rocks-be/internal/types" +) + +const ( + defaultMaxMemory = 32 << 20 // 32 MB + maxHeaderBytes = 512 // 用于MIME类型检测的最大字节数 +) + +// ValidateUpload 创建文件上传验证中间件 +func ValidateUpload(cfg *types.UploadConfig) gin.HandlerFunc { + return func(c *gin.Context) { + // 检查是否是multipart/form-data请求 + if !strings.HasPrefix(c.GetHeader("Content-Type"), "multipart/form-data") { + c.JSON(http.StatusBadRequest, gin.H{ + "error": "Content-Type must be multipart/form-data", + }) + c.Abort() + return + } + + // 解析multipart表单 + if err := c.Request.ParseMultipartForm(defaultMaxMemory); err != nil { + c.JSON(http.StatusBadRequest, gin.H{ + "error": fmt.Sprintf("Failed to parse form: %v", err), + }) + c.Abort() + return + } + + form := c.Request.MultipartForm + if form == nil || form.File == nil { + c.JSON(http.StatusBadRequest, gin.H{ + "error": "No file uploaded", + }) + c.Abort() + return + } + + // 遍历所有上传的文件 + for _, files := range form.File { + for _, file := range files { + // 检查文件大小 + if file.Size > int64(cfg.MaxSize)<<20 { // 转换为字节 + c.JSON(http.StatusBadRequest, gin.H{ + "error": fmt.Sprintf("File %s exceeds maximum size of %d MB", file.Filename, cfg.MaxSize), + }) + c.Abort() + return + } + + // 检查文件扩展名 + ext := strings.ToLower(filepath.Ext(file.Filename)) + if !contains(cfg.AllowedExtensions, ext) { + c.JSON(http.StatusBadRequest, gin.H{ + "error": fmt.Sprintf("File extension %s is not allowed", ext), + }) + c.Abort() + return + } + + // 打开文件 + src, err := file.Open() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": fmt.Sprintf("Failed to open file: %v", err), + }) + c.Abort() + return + } + defer src.Close() + + // 读取文件头部用于MIME类型检测 + header := make([]byte, maxHeaderBytes) + n, err := src.Read(header) + if err != nil && err != io.EOF { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": fmt.Sprintf("Failed to read file: %v", err), + }) + c.Abort() + return + } + header = header[:n] + + // 检测MIME类型 + contentType := http.DetectContentType(header) + if !contains(cfg.AllowedTypes, contentType) { + c.JSON(http.StatusBadRequest, gin.H{ + "error": fmt.Sprintf("File type %s is not allowed", contentType), + }) + c.Abort() + return + } + + // 将文件指针重置到开始位置 + _, err = src.Seek(0, 0) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": fmt.Sprintf("Failed to read file: %v", err), + }) + c.Abort() + return + } + + // 将文件内容读入缓冲区 + buf := &bytes.Buffer{} + _, err = io.Copy(buf, src) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": fmt.Sprintf("Failed to read file: %v", err), + }) + c.Abort() + return + } + + // 将验证过的文件内容和类型保存到上下文中 + c.Set("validated_file_"+file.Filename, buf) + c.Set("validated_content_type_"+file.Filename, contentType) + } + } + + c.Next() + } +} + +// contains 检查切片中是否包含指定的字符串 +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} + +// GetValidatedFile 从上下文中获取验证过的文件内容 +func GetValidatedFile(c *gin.Context, filename string) (*bytes.Buffer, string, bool) { + file, exists := c.Get("validated_file_" + filename) + if !exists { + return nil, "", false + } + + contentType, exists := c.Get("validated_content_type_" + filename) + if !exists { + return nil, "", false + } + + return file.(*bytes.Buffer), contentType.(string), true +} diff --git a/backend/internal/middleware/upload_test.go b/backend/internal/middleware/upload_test.go new file mode 100644 index 0000000..7434484 --- /dev/null +++ b/backend/internal/middleware/upload_test.go @@ -0,0 +1,262 @@ +package middleware + +import ( + "bytes" + "encoding/json" + "io" + "mime/multipart" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "tss-rocks-be/internal/types" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" +) + +func createMultipartRequest(t *testing.T, filename string, content []byte, contentType string) (*http.Request, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + return nil, err + } + + _, err = io.Copy(part, bytes.NewReader(content)) + if err != nil { + return nil, err + } + + err = writer.Close() + if err != nil { + return nil, err + } + + req := httptest.NewRequest("POST", "/upload", body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + return req, nil +} + +func TestValidateUpload(t *testing.T) { + tests := []struct { + name string + config *types.UploadConfig + filename string + content []byte + setupRequest func(*testing.T) *http.Request + expectedStatus int + expectedError string + }{ + { + name: "Valid image upload", + config: &types.UploadConfig{ + MaxSize: 5, // 5MB + AllowedExtensions: []string{".jpg", ".jpeg", ".png"}, + AllowedTypes: []string{"image/jpeg", "image/png"}, + }, + filename: "test.jpg", + content: []byte{ + 0xFF, 0xD8, 0xFF, 0xE0, // JPEG magic numbers + 0x00, 0x10, 0x4A, 0x46, 0x49, 0x46, 0x00, + }, + expectedStatus: http.StatusOK, + }, + { + name: "Invalid file extension", + config: &types.UploadConfig{ + MaxSize: 5, + AllowedExtensions: []string{".jpg", ".jpeg", ".png"}, + AllowedTypes: []string{"image/jpeg", "image/png"}, + }, + filename: "test.txt", + content: []byte("test content"), + expectedStatus: http.StatusBadRequest, + expectedError: "File extension .txt is not allowed", + }, + { + name: "File too large", + config: &types.UploadConfig{ + MaxSize: 1, // 1MB + AllowedExtensions: []string{".jpg"}, + AllowedTypes: []string{"image/jpeg"}, + }, + filename: "large.jpg", + content: make([]byte, 2<<20), // 2MB + expectedStatus: http.StatusBadRequest, + expectedError: "File large.jpg exceeds maximum size of 1 MB", + }, + { + name: "Invalid content type", + config: &types.UploadConfig{ + MaxSize: 5, + AllowedExtensions: []string{".jpg"}, + AllowedTypes: []string{"image/jpeg"}, + }, + filename: "fake.jpg", + content: []byte("not a real image"), + expectedStatus: http.StatusBadRequest, + expectedError: "File type text/plain; charset=utf-8 is not allowed", + }, + { + name: "Missing file", + config: &types.UploadConfig{ + MaxSize: 5, + AllowedExtensions: []string{".jpg"}, + AllowedTypes: []string{"image/jpeg"}, + }, + setupRequest: func(t *testing.T) *http.Request { + req := httptest.NewRequest("POST", "/upload", strings.NewReader("")) + req.Header.Set("Content-Type", "multipart/form-data") + return req + }, + expectedStatus: http.StatusBadRequest, + expectedError: "Failed to parse form", + }, + { + name: "Invalid content type header", + config: &types.UploadConfig{ + MaxSize: 5, + AllowedExtensions: []string{".jpg"}, + AllowedTypes: []string{"image/jpeg"}, + }, + setupRequest: func(t *testing.T) *http.Request { + return httptest.NewRequest("POST", "/upload", nil) + }, + expectedStatus: http.StatusBadRequest, + expectedError: "Content-Type must be multipart/form-data", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + var req *http.Request + var err error + + if tt.setupRequest != nil { + req = tt.setupRequest(t) + } else { + req, err = createMultipartRequest(t, tt.filename, tt.content, "") + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + } + + c.Request = req + + middleware := ValidateUpload(tt.config) + middleware(c) + + assert.Equal(t, tt.expectedStatus, w.Code) + if tt.expectedError != "" { + var response map[string]string + err := json.NewDecoder(w.Body).Decode(&response) + assert.NoError(t, err) + assert.Contains(t, response["error"], tt.expectedError) + } + }) + } +} + +func TestGetValidatedFile(t *testing.T) { + tests := []struct { + name string + setupContext func(*gin.Context) + filename string + expectedFound bool + expectedError string + }{ + { + name: "Get existing file", + setupContext: func(c *gin.Context) { + // 创建测试文件内容 + content := []byte("test content") + buf := bytes.NewBuffer(content) + + // 设置验证过的文件和内容类型 + c.Set("validated_file_test.txt", buf) + c.Set("validated_content_type_test.txt", "text/plain") + }, + filename: "test.txt", + expectedFound: true, + }, + { + name: "File not found", + setupContext: func(c *gin.Context) { + // 不设置任何文件 + }, + filename: "nonexistent.txt", + expectedFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + if tt.setupContext != nil { + tt.setupContext(c) + } + + buffer, contentType, found := GetValidatedFile(c, tt.filename) + + assert.Equal(t, tt.expectedFound, found) + if tt.expectedFound { + assert.NotNil(t, buffer) + assert.NotEmpty(t, contentType) + } else { + assert.Nil(t, buffer) + assert.Empty(t, contentType) + } + }) + } +} + +func TestContains(t *testing.T) { + tests := []struct { + name string + slice []string + str string + expected bool + }{ + { + name: "String found in slice", + slice: []string{"a", "b", "c"}, + str: "b", + expected: true, + }, + { + name: "String not found in slice", + slice: []string{"a", "b", "c"}, + str: "d", + expected: false, + }, + { + name: "Empty slice", + slice: []string{}, + str: "a", + expected: false, + }, + { + name: "Empty string", + slice: []string{"a", "b", "c"}, + str: "", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := contains(tt.slice, tt.str) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/backend/internal/rbac/init.go b/backend/internal/rbac/init.go new file mode 100644 index 0000000..e7ac706 --- /dev/null +++ b/backend/internal/rbac/init.go @@ -0,0 +1,90 @@ +package rbac + +import ( + "context" + "fmt" + + "tss-rocks-be/ent" + "tss-rocks-be/ent/role" +) + +// DefaultPermissions defines the default permissions for each resource +var DefaultPermissions = map[string][]string{ + "media": {"create", "read", "update", "delete", "list"}, + "post": {"create", "read", "update", "delete", "list"}, + "daily": {"create", "read", "update", "delete", "list"}, + "user": {"create", "read", "update", "delete", "list"}, +} + +// DefaultRoles defines the default roles and their permissions +var DefaultRoles = map[string]map[string][]string{ + "admin": DefaultPermissions, + "editor": { + "media": {"create", "read", "update", "list"}, + "post": {"create", "read", "update", "list"}, + "daily": {"create", "read", "update", "list"}, + "user": {"read"}, + }, + "contributor": { + "media": {"read", "list"}, + "post": {"read", "list"}, + "daily": {"read", "list"}, + }, +} + +// InitializeRBAC initializes the RBAC system with default roles and permissions +func InitializeRBAC(ctx context.Context, client *ent.Client) error { + // Create permissions + permissionMap := make(map[string]*ent.Permission) + for resource, actions := range DefaultPermissions { + for _, action := range actions { + permission, err := client.Permission.Create(). + SetResource(resource). + SetAction(action). + SetDescription(fmt.Sprintf("Permission to %s %s", action, resource)). + Save(ctx) + if err != nil { + return fmt.Errorf("failed creating permission: %w", err) + } + key := fmt.Sprintf("%s:%s", resource, action) + permissionMap[key] = permission + } + } + + // Create roles with permissions + for roleName, permissions := range DefaultRoles { + roleCreate := client.Role.Create(). + SetName(roleName). + SetDescription(fmt.Sprintf("Role for %s users", roleName)) + + // Add permissions to role + for resource, actions := range permissions { + for _, action := range actions { + key := fmt.Sprintf("%s:%s", resource, action) + if permission, exists := permissionMap[key]; exists { + roleCreate.AddPermissions(permission) + } + } + } + + if _, err := roleCreate.Save(ctx); err != nil { + return fmt.Errorf("failed creating role %s: %w", roleName, err) + } + } + + return nil +} + +// AssignRoleToUser assigns a role to a user +func AssignRoleToUser(ctx context.Context, client *ent.Client, userID int, roleName string) error { + role, err := client.Role.Query(). + Where(role.Name(roleName)). + Only(ctx) + if err != nil { + return fmt.Errorf("failed querying role: %w", err) + } + + return client.User.UpdateOneID(userID). + AddRoles(role). + Exec(ctx) +} diff --git a/backend/internal/rbac/init_test.go b/backend/internal/rbac/init_test.go new file mode 100644 index 0000000..cdaaae8 --- /dev/null +++ b/backend/internal/rbac/init_test.go @@ -0,0 +1,98 @@ +package rbac + +import ( + "context" + "testing" + + "tss-rocks-be/ent/enttest" + "tss-rocks-be/ent/role" + + _ "github.com/mattn/go-sqlite3" +) + +func TestInitializeRBAC(t *testing.T) { + // Create an in-memory SQLite client for testing + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + ctx := context.Background() + + // Test initialization + err := InitializeRBAC(ctx, client) + if err != nil { + t.Fatalf("Failed to initialize RBAC: %v", err) + } + + // Verify roles were created + for roleName := range DefaultRoles { + r, err := client.Role.Query().Where(role.Name(roleName)).Only(ctx) + if err != nil { + t.Errorf("Role %s was not created: %v", roleName, err) + } + + // Verify permissions for each role + perms, err := r.QueryPermissions().All(ctx) + if err != nil { + t.Errorf("Failed to query permissions for role %s: %v", roleName, err) + } + + expectedPerms := DefaultRoles[roleName] + permCount := 0 + for _, actions := range expectedPerms { + permCount += len(actions) + } + + if len(perms) != permCount { + t.Errorf("Role %s has %d permissions, expected %d", roleName, len(perms), permCount) + } + } +} + +func TestAssignRoleToUser(t *testing.T) { + // Create an in-memory SQLite client for testing + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + ctx := context.Background() + + // Initialize RBAC + err := InitializeRBAC(ctx, client) + if err != nil { + t.Fatalf("Failed to initialize RBAC: %v", err) + } + + // Create a test user + user, err := client.User.Create(). + SetEmail("test@example.com"). + SetPasswordHash("$2a$10$hzLdXMZEIzgr8eGXL0YoCOIIrQhqEj6N.S3.wY1Jx5.4vWm1ZyHyy"). + Save(ctx) + if err != nil { + t.Fatalf("Failed to create test user: %v", err) + } + + // Test assigning role to user + err = AssignRoleToUser(ctx, client, user.ID, "editor") + if err != nil { + t.Fatalf("Failed to assign role to user: %v", err) + } + + // Verify role assignment + assignedRoles, err := user.QueryRoles().All(ctx) + if err != nil { + t.Fatalf("Failed to query user roles: %v", err) + } + + if len(assignedRoles) != 1 { + t.Errorf("Expected 1 role, got %d", len(assignedRoles)) + } + + if assignedRoles[0].Name != "editor" { + t.Errorf("Expected role name 'editor', got '%s'", assignedRoles[0].Name) + } + + // Test assigning non-existent role + err = AssignRoleToUser(ctx, client, user.ID, "nonexistent") + if err == nil { + t.Error("Expected error when assigning non-existent role, got nil") + } +} diff --git a/backend/internal/server/database.go b/backend/internal/server/database.go new file mode 100644 index 0000000..bb57244 --- /dev/null +++ b/backend/internal/server/database.go @@ -0,0 +1,24 @@ +package server + +import ( + "context" + "tss-rocks-be/ent" + + "github.com/rs/zerolog/log" +) + +func InitDatabase(ctx context.Context, driver, dsn string) (*ent.Client, error) { + client, err := ent.Open(driver, dsn) + if err != nil { + log.Error().Err(err).Msg("failed opening database connection") + return nil, err + } + + // Run the auto migration tool + if err := client.Schema.Create(ctx); err != nil { + log.Error().Err(err).Msg("failed creating schema resources") + return nil, err + } + + return client, nil +} diff --git a/backend/internal/server/database_test.go b/backend/internal/server/database_test.go new file mode 100644 index 0000000..c8e35da --- /dev/null +++ b/backend/internal/server/database_test.go @@ -0,0 +1,64 @@ +package server + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInitDatabase(t *testing.T) { + tests := []struct { + name string + driver string + dsn string + wantErr bool + errContains string + }{ + { + name: "success with sqlite3", + driver: "sqlite3", + dsn: "file:ent?mode=memory&cache=shared&_fk=1", + }, + { + name: "invalid driver", + driver: "invalid_driver", + dsn: "file:ent?mode=memory", + wantErr: true, + errContains: "unsupported driver", + }, + { + name: "invalid dsn", + driver: "sqlite3", + dsn: "file::memory:?not_exist_option=1", // 使用内存数据库但带有无效选项 + wantErr: true, + errContains: "foreign_keys pragma is off", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + client, err := InitDatabase(ctx, tt.driver, tt.dsn) + + if tt.wantErr { + assert.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + assert.Nil(t, client) + } else { + require.NoError(t, err) + assert.NotNil(t, client) + + // 测试数据库连接是否正常工作 + err = client.Schema.Create(ctx) + assert.NoError(t, err) + + // 清理 + client.Close() + } + }) + } +} diff --git a/backend/internal/server/ent.go b/backend/internal/server/ent.go new file mode 100644 index 0000000..9e289ae --- /dev/null +++ b/backend/internal/server/ent.go @@ -0,0 +1,31 @@ +package server + +import ( + "context" + + "entgo.io/ent/dialect/sql" + _ "github.com/mattn/go-sqlite3" + "github.com/rs/zerolog/log" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" +) + +// NewEntClient creates a new ent client +func NewEntClient(cfg *config.Config) *ent.Client { + // TODO: Implement database connection based on config + // For now, we'll use SQLite for development + db, err := sql.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + if err != nil { + log.Fatal().Err(err).Msg("Failed to connect to database") + } + + // Create ent client + client := ent.NewClient(ent.Driver(db)) + + // Run the auto migration tool + if err := client.Schema.Create(context.Background()); err != nil { + log.Fatal().Err(err).Msg("Failed to create schema resources") + } + + return client +} diff --git a/backend/internal/server/ent_test.go b/backend/internal/server/ent_test.go new file mode 100644 index 0000000..1f71881 --- /dev/null +++ b/backend/internal/server/ent_test.go @@ -0,0 +1,40 @@ +package server + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "tss-rocks-be/internal/config" +) + +func TestNewEntClient(t *testing.T) { + tests := []struct { + name string + cfg *config.Config + }{ + { + name: "default sqlite3 config", + cfg: &config.Config{ + Database: config.DatabaseConfig{ + Driver: "sqlite3", + DSN: "file:ent?mode=memory&cache=shared&_fk=1", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := NewEntClient(tt.cfg) + assert.NotNil(t, client) + + // 验证客户端是否可以正常工作 + err := client.Schema.Create(context.Background()) + assert.NoError(t, err) + + // 清理 + client.Close() + }) + } +} diff --git a/backend/internal/server/server.go b/backend/internal/server/server.go new file mode 100644 index 0000000..9291cb7 --- /dev/null +++ b/backend/internal/server/server.go @@ -0,0 +1,90 @@ +package server + +import ( + "context" + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/rs/zerolog/log" + "tss-rocks-be/ent" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/handler" + "tss-rocks-be/internal/middleware" + "tss-rocks-be/internal/service" + "tss-rocks-be/internal/storage" +) + +type Server struct { + config *config.Config + router *gin.Engine + handler *handler.Handler + server *http.Server +} + +func New(cfg *config.Config, client *ent.Client) (*Server, error) { + // Initialize storage + store, err := storage.NewStorage(context.Background(), &cfg.Storage) + if err != nil { + return nil, fmt.Errorf("failed to initialize storage: %w", err) + } + + // Initialize service + svc := service.NewService(client, store) + + // Initialize RBAC + if err := svc.InitializeRBAC(context.Background()); err != nil { + return nil, fmt.Errorf("failed to initialize RBAC: %w", err) + } + + // Initialize handler + h := handler.NewHandler(cfg, svc) + + // Initialize router + router := gin.Default() + + // Add CORS middleware if needed + router.Use(middleware.CORS()) + + // 添加全局中间件 + router.Use(gin.Logger()) + router.Use(gin.Recovery()) + router.Use(middleware.RateLimit(&cfg.RateLimit)) + + // 添加访问日志中间件 + accessLog, err := middleware.AccessLog(&cfg.AccessLog) + if err != nil { + return nil, fmt.Errorf("failed to initialize access log: %w", err) + } + router.Use(accessLog) + + // 为上传路由添加文件验证中间件 + router.POST("/api/v1/media/upload", middleware.ValidateUpload(&cfg.Storage.Upload)) + + // Register routes + h.RegisterRoutes(router) + + return &Server{ + config: cfg, + router: router, + handler: h, + }, nil +} + +func (s *Server) Start() error { + addr := fmt.Sprintf("%s:%d", s.config.Server.Host, s.config.Server.Port) + s.server = &http.Server{ + Addr: addr, + Handler: s.router, + } + + log.Info().Msgf("Starting server on %s", addr) + return s.server.ListenAndServe() +} + +func (s *Server) Shutdown(ctx context.Context) error { + if s.server != nil { + return s.server.Shutdown(ctx) + } + return nil +} diff --git a/backend/internal/server/server_test.go b/backend/internal/server/server_test.go new file mode 100644 index 0000000..41d552e --- /dev/null +++ b/backend/internal/server/server_test.go @@ -0,0 +1,220 @@ +package server + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "tss-rocks-be/internal/config" + "tss-rocks-be/internal/types" + "tss-rocks-be/ent/enttest" +) + +func TestNew(t *testing.T) { + // 创建测试配置 + cfg := &config.Config{ + Server: config.ServerConfig{ + Host: "localhost", + Port: 8080, + }, + Storage: config.StorageConfig{ + Type: "local", + Local: config.LocalStorage{ + RootDir: "testdata", + }, + Upload: types.UploadConfig{ + MaxSize: 10, + AllowedTypes: []string{"image/jpeg", "image/png"}, + AllowedExtensions: []string{".jpg", ".png"}, + }, + }, + RateLimit: types.RateLimitConfig{ + IPRate: 100, + IPBurst: 200, + RouteRates: map[string]struct { + Rate int `yaml:"rate"` + Burst int `yaml:"burst"` + }{ + "/api/v1/upload": {Rate: 10, Burst: 20}, + }, + }, + AccessLog: types.AccessLogConfig{ + EnableConsole: true, + EnableFile: true, + FilePath: "testdata/access.log", + Format: "json", + Level: "info", + Rotation: struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxBackups int `yaml:"max_backups"` + Compress bool `yaml:"compress"` + LocalTime bool `yaml:"local_time"` + }{ + MaxSize: 100, + MaxAge: 7, + MaxBackups: 3, + Compress: true, + LocalTime: true, + }, + }, + } + + // 创建测试数据库客户端 + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + // 测试服务器初始化 + s, err := New(cfg, client) + require.NoError(t, err) + assert.NotNil(t, s) + assert.NotNil(t, s.router) + assert.NotNil(t, s.handler) + assert.Equal(t, cfg, s.config) +} + +func TestNew_StorageError(t *testing.T) { + // 创建一个无效的存储配置 + cfg := &config.Config{ + Storage: config.StorageConfig{ + Type: "invalid_type", // 使用无效的存储类型 + }, + } + + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + s, err := New(cfg, client) + assert.Error(t, err) + assert.Nil(t, s) + assert.Contains(t, err.Error(), "failed to initialize storage") +} + +func TestServer_StartAndShutdown(t *testing.T) { + // 创建测试配置 + cfg := &config.Config{ + Server: config.ServerConfig{ + Host: "localhost", + Port: 0, // 使用随机端口 + }, + Storage: config.StorageConfig{ + Type: "local", + Local: config.LocalStorage{ + RootDir: "testdata", + }, + }, + RateLimit: types.RateLimitConfig{ + IPRate: 100, + IPBurst: 200, + }, + AccessLog: types.AccessLogConfig{ + EnableConsole: true, + Format: "json", + Level: "info", + }, + } + + // 创建测试数据库客户端 + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + // 初始化服务器 + s, err := New(cfg, client) + require.NoError(t, err) + + // 创建一个通道来接收服务器错误 + errChan := make(chan error, 1) + + // 在 goroutine 中启动服务器 + go func() { + err := s.Start() + if err != nil && err != http.ErrServerClosed { + errChan <- err + } + close(errChan) + }() + + // 给服务器一些时间启动 + time.Sleep(100 * time.Millisecond) + + // 测试关闭服务器 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err = s.Shutdown(ctx) + assert.NoError(t, err) + + // 检查服务器是否有错误发生 + err = <-errChan + assert.NoError(t, err) +} + +func TestServer_StartError(t *testing.T) { + // 创建一个配置,使用已经被占用的端口来触发错误 + cfg := &config.Config{ + Server: config.ServerConfig{ + Host: "localhost", + Port: 8899, // 使用固定端口以便测试 + }, + Storage: config.StorageConfig{ + Type: "local", + Local: config.LocalStorage{ + RootDir: "testdata", + }, + }, + } + + client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + defer client.Close() + + // 创建第一个服务器实例 + s1, err := New(cfg, client) + require.NoError(t, err) + + // 创建一个通道来接收服务器错误 + errChan := make(chan error, 1) + + // 启动第一个服务器 + go func() { + err := s1.Start() + if err != nil && err != http.ErrServerClosed { + errChan <- err + } + close(errChan) + }() + + // 给服务器一些时间启动 + time.Sleep(100 * time.Millisecond) + + // 尝试在同一端口启动第二个服务器,应该会失败 + s2, err := New(cfg, client) + require.NoError(t, err) + + err = s2.Start() + assert.Error(t, err) + + // 清理 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // 关闭第一个服务器 + err = s1.Shutdown(ctx) + assert.NoError(t, err) + + // 检查第一个服务器是否有错误发生 + err = <-errChan + assert.NoError(t, err) + + // 关闭第二个服务器 + err = s2.Shutdown(ctx) + assert.NoError(t, err) +} + +func TestServer_ShutdownWithNilServer(t *testing.T) { + s := &Server{} + err := s.Shutdown(context.Background()) + assert.NoError(t, err) +} diff --git a/backend/internal/service/impl.go b/backend/internal/service/impl.go new file mode 100644 index 0000000..30f5f57 --- /dev/null +++ b/backend/internal/service/impl.go @@ -0,0 +1,892 @@ +package service + +import ( + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "sort" + "strconv" + "strings" + + "regexp" + "tss-rocks-be/ent" + "tss-rocks-be/ent/category" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/contributor" + "tss-rocks-be/ent/contributorsociallink" + "tss-rocks-be/ent/daily" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/post" + "tss-rocks-be/ent/postcontent" + "tss-rocks-be/ent/role" + "tss-rocks-be/ent/user" + "tss-rocks-be/internal/storage" + + "github.com/google/uuid" + "golang.org/x/crypto/bcrypt" +) + +// Error definitions +var ( + ErrUnauthorized = errors.New("unauthorized") +) + +// openFile is a variable that holds the Open method of multipart.FileHeader +// This allows us to mock it in tests +var openFile func(fh *multipart.FileHeader) (multipart.File, error) = func(fh *multipart.FileHeader) (multipart.File, error) { + return fh.Open() +} + +type serviceImpl struct { + client *ent.Client + storage storage.Storage +} + +// NewService creates a new Service instance +func NewService(client *ent.Client, storage storage.Storage) Service { + return &serviceImpl{ + client: client, + storage: storage, + } +} + +// User operations +func (s *serviceImpl) CreateUser(ctx context.Context, email, password string, roleStr string) (*ent.User, error) { + // Hash the password + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return nil, fmt.Errorf("failed to hash password: %w", err) + } + + // Add the user role by default + userRole, err := s.client.Role.Query().Where(role.NameEQ("user")).Only(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get user role: %w", err) + } + + // If a specific role is requested and it's not "user", get that role too + var additionalRole *ent.Role + if roleStr != "" && roleStr != "user" { + additionalRole, err = s.client.Role.Query().Where(role.NameEQ(roleStr)).Only(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get role: %w", err) + } + } + + // Create user with password and user role + userCreate := s.client.User.Create(). + SetEmail(email). + SetPasswordHash(string(hashedPassword)). + AddRoles(userRole) + + // Add the additional role if specified + if additionalRole != nil { + userCreate.AddRoles(additionalRole) + } + + // Save the user + user, err := userCreate.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create user: %w", err) + } + + return user, nil +} + +func (s *serviceImpl) GetUserByEmail(ctx context.Context, email string) (*ent.User, error) { + user, err := s.client.User.Query(). + Where(user.EmailEQ(email)). + Only(ctx) + if err != nil { + if ent.IsNotFound(err) { + return nil, fmt.Errorf("user not found: %s", email) + } + return nil, fmt.Errorf("failed to get user: %w", err) + } + return user, nil +} + +func (s *serviceImpl) ValidatePassword(ctx context.Context, user *ent.User, password string) bool { + err := bcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(password)) + return err == nil +} + +// Category operations +func (s *serviceImpl) CreateCategory(ctx context.Context) (*ent.Category, error) { + return s.client.Category.Create().Save(ctx) +} + +func (s *serviceImpl) AddCategoryContent(ctx context.Context, categoryID int, langCode, name, description, slug string) (*ent.CategoryContent, error) { + var languageCode categorycontent.LanguageCode + switch langCode { + case "en": + languageCode = categorycontent.LanguageCodeEN + case "zh-Hans": + languageCode = categorycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = categorycontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + return s.client.CategoryContent.Create(). + SetCategoryID(categoryID). + SetLanguageCode(languageCode). + SetName(name). + SetDescription(description). + SetSlug(slug). + Save(ctx) +} + +func (s *serviceImpl) GetCategoryBySlug(ctx context.Context, langCode, slug string) (*ent.Category, error) { + var languageCode categorycontent.LanguageCode + switch langCode { + case "en": + languageCode = categorycontent.LanguageCodeEN + case "zh-Hans": + languageCode = categorycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = categorycontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + return s.client.Category.Query(). + Where( + category.HasContentsWith( + categorycontent.And( + categorycontent.LanguageCodeEQ(languageCode), + categorycontent.SlugEQ(slug), + ), + ), + ). + WithContents(func(q *ent.CategoryContentQuery) { + q.Where(categorycontent.LanguageCodeEQ(languageCode)) + }). + Only(ctx) +} + +func (s *serviceImpl) GetCategories(ctx context.Context, langCode string) ([]*ent.Category, error) { + // 转换语言代码 + var languageCode categorycontent.LanguageCode + switch langCode { + case "en": + languageCode = categorycontent.LanguageCodeEN + case "zh-Hans": + languageCode = categorycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = categorycontent.LanguageCodeZH_HANT + default: + // 不支持的语言代码返回空列表而不是错误 + return []*ent.Category{}, nil + } + + // 从 CategoryContent 表开始查询,确保只返回有指定语言内容的分类 + contents, err := s.client.CategoryContent.Query(). + Where( + categorycontent.And( + categorycontent.LanguageCodeEQ(languageCode), + categorycontent.SlugHasPrefix("category-list-"), + ), + ). + WithCategory(). + All(ctx) + if err != nil { + return nil, err + } + + // 使用 map 去重,因为一个分类可能有多个相同语言的内容 + categoryMap := make(map[int]*ent.Category) + for _, content := range contents { + if content.Edges.Category != nil { + categoryMap[content.Edges.Category.ID] = content.Edges.Category + } + } + + // 将 map 转换为有序的切片 + var categories []*ent.Category + for _, cat := range categoryMap { + // 重新查询分类以获取完整的关联数据 + c, err := s.client.Category.Query(). + Where(category.ID(cat.ID)). + WithContents(func(q *ent.CategoryContentQuery) { + q.Where( + categorycontent.And( + categorycontent.LanguageCodeEQ(languageCode), + categorycontent.SlugHasPrefix("category-list-"), + ), + ) + }). + Only(ctx) + if err != nil { + return nil, err + } + categories = append(categories, c) + } + + // 按 ID 排序以保持结果稳定 + sort.Slice(categories, func(i, j int) bool { + return categories[i].ID < categories[j].ID + }) + + return categories, nil +} + +func (s *serviceImpl) ListCategories(ctx context.Context, langCode string) ([]*ent.Category, error) { + // 转换语言代码 + var languageCode categorycontent.LanguageCode + switch langCode { + case "en": + languageCode = categorycontent.LanguageCodeEN + case "zh-Hans": + languageCode = categorycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = categorycontent.LanguageCodeZH_HANT + default: + // 不支持的语言代码返回空列表而不是错误 + return []*ent.Category{}, nil + } + + // 从 CategoryContent 表开始查询,确保只返回有指定语言内容的分类 + contents, err := s.client.CategoryContent.Query(). + Where( + categorycontent.And( + categorycontent.LanguageCodeEQ(languageCode), + categorycontent.SlugHasPrefix("category-list-"), + ), + ). + WithCategory(). + All(ctx) + if err != nil { + return nil, err + } + + // 使用 map 去重,因为一个分类可能有多个相同语言的内容 + categoryMap := make(map[int]*ent.Category) + for _, content := range contents { + if content.Edges.Category != nil { + categoryMap[content.Edges.Category.ID] = content.Edges.Category + } + } + + // 将 map 转换为有序的切片 + var categories []*ent.Category + for _, cat := range categoryMap { + // 重新查询分类以获取完整的关联数据 + c, err := s.client.Category.Query(). + Where(category.ID(cat.ID)). + WithContents(func(q *ent.CategoryContentQuery) { + q.Where( + categorycontent.And( + categorycontent.LanguageCodeEQ(languageCode), + categorycontent.SlugHasPrefix("category-list-"), + ), + ) + }). + Only(ctx) + if err != nil { + return nil, err + } + categories = append(categories, c) + } + + // 按 ID 排序以保持结果稳定 + sort.Slice(categories, func(i, j int) bool { + return categories[i].ID < categories[j].ID + }) + + return categories, nil +} + +// Daily operations +func (s *serviceImpl) CreateDaily(ctx context.Context, id string, categoryID int, imageURL string) (*ent.Daily, error) { + _, err := s.client.Daily.Create(). + SetID(id). + SetCategoryID(categoryID). + SetImageURL(imageURL). + Save(ctx) + if err != nil { + return nil, err + } + // 加载 Category Edge + return s.client.Daily.Query(). + Where(daily.IDEQ(id)). + WithCategory(). + Only(ctx) +} + +func (s *serviceImpl) AddDailyContent(ctx context.Context, dailyID string, langCode string, quote string) (*ent.DailyContent, error) { + var languageCode dailycontent.LanguageCode + switch langCode { + case "en": + languageCode = dailycontent.LanguageCodeEN + case "zh-Hans": + languageCode = dailycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = dailycontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + return s.client.DailyContent.Create(). + SetDailyID(dailyID). + SetLanguageCode(languageCode). + SetQuote(quote). + Save(ctx) +} + +func (s *serviceImpl) GetDailyByID(ctx context.Context, id string) (*ent.Daily, error) { + return s.client.Daily.Query(). + Where(daily.IDEQ(id)). + WithCategory(). + WithContents(). + Only(ctx) +} + +func (s *serviceImpl) ListDailies(ctx context.Context, langCode string, categoryID *int, limit int, offset int) ([]*ent.Daily, error) { + var languageCode dailycontent.LanguageCode + switch langCode { + case "en": + languageCode = dailycontent.LanguageCodeEN + case "zh-Hans": + languageCode = dailycontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = dailycontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + query := s.client.Daily.Query(). + WithContents(func(q *ent.DailyContentQuery) { + if langCode != "" { + q.Where(dailycontent.LanguageCodeEQ(languageCode)) + } + }). + WithCategory() + + if categoryID != nil { + query.Where(daily.HasCategoryWith(category.ID(*categoryID))) + } + + query.Order(ent.Desc(daily.FieldCreatedAt)) + + if limit > 0 { + query.Limit(limit) + } + if offset > 0 { + query.Offset(offset) + } + + return query.All(ctx) +} + +// Media operations +func (s *serviceImpl) ListMedia(ctx context.Context, limit, offset int) ([]*ent.Media, error) { + return s.client.Media.Query(). + Order(ent.Desc("created_at")). + Limit(limit). + Offset(offset). + All(ctx) +} + +func (s *serviceImpl) Upload(ctx context.Context, file *multipart.FileHeader, userID int) (*ent.Media, error) { + // Open the uploaded file + src, err := openFile(file) + if err != nil { + return nil, err + } + defer src.Close() + + // Save the file to storage + fileInfo, err := s.storage.Save(ctx, file.Filename, file.Header.Get("Content-Type"), src) + if err != nil { + return nil, err + } + + // Create media record + return s.client.Media.Create(). + SetStorageID(fileInfo.ID). + SetOriginalName(file.Filename). + SetMimeType(fileInfo.ContentType). + SetSize(fileInfo.Size). + SetURL(fileInfo.URL). + SetCreatedBy(strconv.Itoa(userID)). + Save(ctx) +} + +func (s *serviceImpl) GetMedia(ctx context.Context, id int) (*ent.Media, error) { + return s.client.Media.Get(ctx, id) +} + +func (s *serviceImpl) GetFile(ctx context.Context, id int) (io.ReadCloser, *storage.FileInfo, error) { + media, err := s.GetMedia(ctx, id) + if err != nil { + return nil, nil, err + } + + return s.storage.Get(ctx, media.StorageID) +} + +func (s *serviceImpl) DeleteMedia(ctx context.Context, id int, userID int) error { + media, err := s.GetMedia(ctx, id) + if err != nil { + return err + } + + // Check ownership + if media.CreatedBy != strconv.Itoa(userID) { + return ErrUnauthorized + } + + // Delete from storage + if err := s.storage.Delete(ctx, media.StorageID); err != nil { + return err + } + + // Delete from database + return s.client.Media.DeleteOne(media).Exec(ctx) +} + +// Post operations +func (s *serviceImpl) CreatePost(ctx context.Context, status string) (*ent.Post, error) { + var postStatus post.Status + switch status { + case "draft": + postStatus = post.StatusDraft + case "published": + postStatus = post.StatusPublished + case "archived": + postStatus = post.StatusArchived + default: + return nil, fmt.Errorf("invalid status: %s", status) + } + + // Generate a random slug + slug := fmt.Sprintf("post-%s", uuid.New().String()[:8]) + + return s.client.Post.Create(). + SetStatus(postStatus). + SetSlug(slug). + Save(ctx) +} + +func (s *serviceImpl) AddPostContent(ctx context.Context, postID int, langCode, title, content, summary string, metaKeywords, metaDescription string) (*ent.PostContent, error) { + var languageCode postcontent.LanguageCode + switch langCode { + case "en": + languageCode = postcontent.LanguageCodeEN + case "zh-Hans": + languageCode = postcontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = postcontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + // Get the post first to check if it exists + post, err := s.client.Post.Get(ctx, postID) + if err != nil { + return nil, fmt.Errorf("failed to get post: %w", err) + } + + // Generate slug from title + var slug string + if langCode == "en" { + // For English titles, convert to lowercase and replace spaces with dashes + slug = strings.ToLower(strings.ReplaceAll(title, " ", "-")) + // Remove all non-alphanumeric characters except dashes + slug = regexp.MustCompile(`[^a-z0-9-]+`).ReplaceAllString(slug, "") + // Ensure slug is not empty and has minimum length + if slug == "" || len(slug) < 4 { + slug = fmt.Sprintf("post-%s", uuid.NewString()[:8]) + } + } else { + // For Chinese titles, use the title as is + slug = title + } + + return s.client.PostContent.Create(). + SetPost(post). + SetLanguageCode(languageCode). + SetTitle(title). + SetContentMarkdown(content). + SetSummary(summary). + SetMetaKeywords(metaKeywords). + SetMetaDescription(metaDescription). + SetSlug(slug). + Save(ctx) +} + +func (s *serviceImpl) GetPostBySlug(ctx context.Context, langCode, slug string) (*ent.Post, error) { + var languageCode postcontent.LanguageCode + switch langCode { + case "en": + languageCode = postcontent.LanguageCodeEN + case "zh-Hans": + languageCode = postcontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = postcontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + // Find posts that have content with the given slug and language code + posts, err := s.client.Post.Query(). + Where( + post.And( + post.StatusEQ(post.StatusPublished), + post.HasContentsWith( + postcontent.And( + postcontent.LanguageCodeEQ(languageCode), + postcontent.SlugEQ(slug), + ), + ), + ), + ). + WithContents(func(q *ent.PostContentQuery) { + q.Where(postcontent.LanguageCodeEQ(languageCode)) + }). + WithCategory(). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get posts: %w", err) + } + + if len(posts) == 0 { + return nil, fmt.Errorf("post not found") + } + if len(posts) > 1 { + return nil, fmt.Errorf("multiple posts found with the same slug") + } + + return posts[0], nil +} + +func (s *serviceImpl) ListPosts(ctx context.Context, langCode string, categoryID *int, limit, offset int) ([]*ent.Post, error) { + var languageCode postcontent.LanguageCode + switch langCode { + case "en": + languageCode = postcontent.LanguageCodeEN + case "zh-Hans": + languageCode = postcontent.LanguageCodeZH_HANS + case "zh-Hant": + languageCode = postcontent.LanguageCodeZH_HANT + default: + return nil, fmt.Errorf("unsupported language code: %s", langCode) + } + + // First find all post IDs that have content in the given language + query := s.client.PostContent.Query(). + Where(postcontent.LanguageCodeEQ(languageCode)). + QueryPost(). + Where(post.StatusEQ(post.StatusPublished)) + + // Add category filter if provided + if categoryID != nil { + query = query.Where(post.HasCategoryWith(category.ID(*categoryID))) + } + + // Get unique post IDs + postIDs, err := query. + Order(ent.Desc(post.FieldCreatedAt)). + IDs(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get post IDs: %w", err) + } + + // Remove duplicates while preserving order + seen := make(map[int]bool) + uniqueIDs := make([]int, 0, len(postIDs)) + for _, id := range postIDs { + if !seen[id] { + seen[id] = true + uniqueIDs = append(uniqueIDs, id) + } + } + postIDs = uniqueIDs + + if len(postIDs) == 0 { + return []*ent.Post{}, nil + } + + // If no category filter is applied, only take the latest 5 posts + if categoryID == nil && len(postIDs) > 5 { + postIDs = postIDs[:5] + } + + // Apply pagination + if offset >= len(postIDs) { + return []*ent.Post{}, nil + } + + // If limit is 0, set it to the length of postIDs + if limit == 0 { + limit = len(postIDs) + } + + // Adjust limit if it would exceed total + if offset+limit > len(postIDs) { + limit = len(postIDs) - offset + } + + // Get the paginated post IDs + paginatedIDs := postIDs[offset : offset+limit] + + // Get the posts with their contents + posts, err := s.client.Post.Query(). + Where(post.IDIn(paginatedIDs...)). + WithContents(func(q *ent.PostContentQuery) { + q.Where(postcontent.LanguageCodeEQ(languageCode)) + }). + WithCategory(). + Order(ent.Desc(post.FieldCreatedAt)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get posts: %w", err) + } + + // Sort posts by ID to match the order of postIDs + sort.Slice(posts, func(i, j int) bool { + // Find index of each post ID in postIDs + var iIndex, jIndex int + for idx, id := range paginatedIDs { + if posts[i].ID == id { + iIndex = idx + } + if posts[j].ID == id { + jIndex = idx + } + } + return iIndex < jIndex + }) + + return posts, nil +} + +// Contributor operations +func (s *serviceImpl) CreateContributor(ctx context.Context, name string, avatarURL, bio *string) (*ent.Contributor, error) { + builder := s.client.Contributor.Create(). + SetName(name) + + if avatarURL != nil { + builder.SetAvatarURL(*avatarURL) + } + if bio != nil { + builder.SetBio(*bio) + } + + contributor, err := builder.Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create contributor: %w", err) + } + + return contributor, nil +} + +func (s *serviceImpl) AddContributorSocialLink(ctx context.Context, contributorID int, linkType, name, value string) (*ent.ContributorSocialLink, error) { + // 验证贡献者是否存在 + contributor, err := s.client.Contributor.Get(ctx, contributorID) + if err != nil { + return nil, fmt.Errorf("failed to get contributor: %w", err) + } + + // 创建社交链接 + link, err := s.client.ContributorSocialLink.Create(). + SetContributor(contributor). + SetType(contributorsociallink.Type(linkType)). + SetName(name). + SetValue(value). + Save(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create social link: %w", err) + } + + return link, nil +} + +func (s *serviceImpl) GetContributorByID(ctx context.Context, id int) (*ent.Contributor, error) { + contributor, err := s.client.Contributor.Query(). + Where(contributor.ID(id)). + WithSocialLinks(). + Only(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get contributor: %w", err) + } + return contributor, nil +} + +func (s *serviceImpl) ListContributors(ctx context.Context) ([]*ent.Contributor, error) { + contributors, err := s.client.Contributor.Query(). + WithSocialLinks(). + Order(ent.Asc(contributor.FieldName)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list contributors: %w", err) + } + return contributors, nil +} + +// RBAC operations +func (s *serviceImpl) InitializeRBAC(ctx context.Context) error { + // Create roles if they don't exist + adminRole, err := s.client.Role.Create().SetName("admin").Save(ctx) + if ent.IsConstraintError(err) { + adminRole, err = s.client.Role.Query().Where(role.NameEQ("admin")).Only(ctx) + } + if err != nil { + return fmt.Errorf("failed to create admin role: %w", err) + } + + editorRole, err := s.client.Role.Create().SetName("editor").Save(ctx) + if ent.IsConstraintError(err) { + editorRole, err = s.client.Role.Query().Where(role.NameEQ("editor")).Only(ctx) + } + if err != nil { + return fmt.Errorf("failed to create editor role: %w", err) + } + + userRole, err := s.client.Role.Create().SetName("user").Save(ctx) + if ent.IsConstraintError(err) { + userRole, err = s.client.Role.Query().Where(role.NameEQ("user")).Only(ctx) + } + if err != nil { + return fmt.Errorf("failed to create user role: %w", err) + } + + // Define permissions + permissions := []struct { + role *ent.Role + resource string + actions []string + }{ + // Admin permissions (full access) + {adminRole, "users", []string{"create", "read", "update", "delete", "assign_role"}}, + {adminRole, "roles", []string{"create", "read", "update", "delete"}}, + {adminRole, "media", []string{"create", "read", "update", "delete"}}, + {adminRole, "posts", []string{"create", "read", "update", "delete"}}, + {adminRole, "categories", []string{"create", "read", "update", "delete"}}, + {adminRole, "contributors", []string{"create", "read", "update", "delete"}}, + {adminRole, "dailies", []string{"create", "read", "update", "delete"}}, + + // Editor permissions (can create and manage content) + {editorRole, "media", []string{"create", "read", "update", "delete"}}, + {editorRole, "posts", []string{"create", "read", "update", "delete"}}, + {editorRole, "categories", []string{"read"}}, + {editorRole, "contributors", []string{"read"}}, + {editorRole, "dailies", []string{"create", "read", "update", "delete"}}, + + // User permissions (read-only access) + {userRole, "media", []string{"read"}}, + {userRole, "posts", []string{"read"}}, + {userRole, "categories", []string{"read"}}, + {userRole, "contributors", []string{"read"}}, + {userRole, "dailies", []string{"read"}}, + } + + // Create permissions for each role + for _, p := range permissions { + for _, action := range p.actions { + perm, err := s.client.Permission.Create(). + SetResource(p.resource). + SetAction(action). + Save(ctx) + if ent.IsConstraintError(err) { + perm, err = s.client.Permission.Query(). + Where( + permission.ResourceEQ(p.resource), + permission.ActionEQ(action), + ). + Only(ctx) + } + if err != nil { + return fmt.Errorf("failed to create permission %s:%s: %w", p.resource, action, err) + } + + // Add permission to role + err = s.client.Role.UpdateOne(p.role). + AddPermissions(perm). + Exec(ctx) + if err != nil && !ent.IsConstraintError(err) { + return fmt.Errorf("failed to add permission %s:%s to role %s: %w", p.resource, action, p.role.Name, err) + } + } + } + + return nil +} + +func (s *serviceImpl) AssignRole(ctx context.Context, userID int, roleName string) error { + user, err := s.client.User.Get(ctx, userID) + if err != nil { + return fmt.Errorf("failed to get user: %w", err) + } + + role, err := s.client.Role.Query().Where(role.NameEQ(roleName)).Only(ctx) + if err != nil { + return fmt.Errorf("failed to get role: %w", err) + } + + return s.client.User.UpdateOne(user).AddRoles(role).Exec(ctx) +} + +func (s *serviceImpl) RemoveRole(ctx context.Context, userID int, roleName string) error { + // Don't allow removing the user role + if roleName == "user" { + return errors.New("cannot remove user role") + } + + user, err := s.client.User.Get(ctx, userID) + if err != nil { + return fmt.Errorf("failed to get user: %w", err) + } + + role, err := s.client.Role.Query().Where(role.NameEQ(roleName)).Only(ctx) + if err != nil { + return fmt.Errorf("failed to get role: %w", err) + } + + return s.client.User.UpdateOne(user).RemoveRoles(role).Exec(ctx) +} + +func (s *serviceImpl) GetUserRoles(ctx context.Context, userID int) ([]*ent.Role, error) { + user, err := s.client.User.Query(). + Where(user.ID(userID)). + WithRoles(). + Only(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get user: %w", err) + } + + return user.Edges.Roles, nil +} + +func (s *serviceImpl) HasPermission(ctx context.Context, userID int, permission string) (bool, error) { + user, err := s.client.User.Query(). + Where(user.ID(userID)). + WithRoles(func(q *ent.RoleQuery) { + q.WithPermissions() + }). + Only(ctx) + if err != nil { + return false, fmt.Errorf("failed to get user: %w", err) + } + + parts := strings.Split(permission, ":") + if len(parts) != 2 { + return false, fmt.Errorf("invalid permission format: %s, expected format: resource:action", permission) + } + resource, action := parts[0], parts[1] + + for _, r := range user.Edges.Roles { + for _, p := range r.Edges.Permissions { + if p.Resource == resource && p.Action == action { + return true, nil + } + } + } + + return false, nil +} diff --git a/backend/internal/service/impl_test.go b/backend/internal/service/impl_test.go new file mode 100644 index 0000000..5a00848 --- /dev/null +++ b/backend/internal/service/impl_test.go @@ -0,0 +1,1092 @@ +package service + +import ( + "bytes" + "context" + "fmt" + "io" + "mime/multipart" + "net/textproto" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "tss-rocks-be/ent" + "tss-rocks-be/ent/categorycontent" + "tss-rocks-be/ent/dailycontent" + "tss-rocks-be/internal/storage" + "tss-rocks-be/internal/storage/mock" + "tss-rocks-be/internal/testutil" +) + +type ServiceImplTestSuite struct { + suite.Suite + ctx context.Context + client *ent.Client + storage *mock.MockStorage + ctrl *gomock.Controller + svc Service +} + +func (s *ServiceImplTestSuite) SetupTest() { + s.ctx = context.Background() + s.client = testutil.NewTestClient() + require.NotNil(s.T(), s.client) + + s.ctrl = gomock.NewController(s.T()) + s.storage = mock.NewMockStorage(s.ctrl) + s.svc = NewService(s.client, s.storage) + + // 清理数据库 + _, err := s.client.Category.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.CategoryContent.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.User.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.Role.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.Permission.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.Daily.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + _, err = s.client.DailyContent.Delete().Exec(s.ctx) + require.NoError(s.T(), err) + + // 初始化 RBAC 系统 + err = s.svc.InitializeRBAC(s.ctx) + require.NoError(s.T(), err) + + // Set default openFile function + openFile = func(fh *multipart.FileHeader) (multipart.File, error) { + return fh.Open() + } +} + +func (s *ServiceImplTestSuite) TearDownTest() { + s.ctrl.Finish() + s.client.Close() +} + +func TestServiceImplSuite(t *testing.T) { + suite.Run(t, new(ServiceImplTestSuite)) +} + +// mockMultipartFile implements multipart.File interface +type mockMultipartFile struct { + *bytes.Reader +} + +func (m *mockMultipartFile) Close() error { + return nil +} + +func (m *mockMultipartFile) ReadAt(p []byte, off int64) (n int, err error) { + return m.Reader.ReadAt(p, off) +} + +func (m *mockMultipartFile) Seek(offset int64, whence int) (int64, error) { + return m.Reader.Seek(offset, whence) +} + +func newMockMultipartFile(data []byte) *mockMultipartFile { + return &mockMultipartFile{ + Reader: bytes.NewReader(data), + } +} + +func (s *ServiceImplTestSuite) TestCreateUser() { + testCases := []struct { + name string + email string + password string + role string + wantError bool + }{ + { + name: "Valid user creation", + email: "test@example.com", + password: "password123", + role: "admin", + wantError: false, + }, + { + name: "Empty email", + email: "", + password: "password123", + role: "user", + wantError: true, + }, + { + name: "Empty password", + email: "test@example.com", + password: "", + role: "user", + wantError: true, + }, + { + name: "Invalid role", + email: "test@example.com", + password: "password123", + role: "invalid_role", + wantError: true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + user, err := s.svc.CreateUser(s.ctx, tc.email, tc.password, tc.role) + if tc.wantError { + assert.Error(s.T(), err) + assert.Nil(s.T(), user) + } else { + assert.NoError(s.T(), err) + assert.NotNil(s.T(), user) + assert.Equal(s.T(), tc.email, user.Email) + } + }) + } +} + +func (s *ServiceImplTestSuite) TestGetUserByEmail() { + // Create a test user first + email := "test@example.com" + password := "password123" + role := "user" + + user, err := s.svc.CreateUser(s.ctx, email, password, role) + require.NoError(s.T(), err) + require.NotNil(s.T(), user) + + s.Run("Existing user", func() { + found, err := s.svc.GetUserByEmail(s.ctx, email) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), found) + assert.Equal(s.T(), email, found.Email) + }) + + s.Run("Non-existing user", func() { + found, err := s.svc.GetUserByEmail(s.ctx, "nonexistent@example.com") + assert.Error(s.T(), err) + assert.Nil(s.T(), found) + }) +} + +func (s *ServiceImplTestSuite) TestValidatePassword() { + // Create a test user first + email := "test@example.com" + password := "password123" + role := "user" + + user, err := s.svc.CreateUser(s.ctx, email, password, role) + require.NoError(s.T(), err) + require.NotNil(s.T(), user) + + s.Run("Valid password", func() { + valid := s.svc.ValidatePassword(s.ctx, user, password) + assert.True(s.T(), valid) + }) + + s.Run("Invalid password", func() { + valid := s.svc.ValidatePassword(s.ctx, user, "wrongpassword") + assert.False(s.T(), valid) + }) +} + +func (s *ServiceImplTestSuite) TestRBAC() { + s.Run("AssignRole", func() { + user, err := s.svc.CreateUser(s.ctx, "test@example.com", "password", "admin") + require.NoError(s.T(), err) + + err = s.svc.AssignRole(s.ctx, user.ID, "user") + assert.NoError(s.T(), err) + }) + + s.Run("RemoveRole", func() { + user, err := s.svc.CreateUser(s.ctx, "test2@example.com", "password", "admin") + require.NoError(s.T(), err) + + err = s.svc.RemoveRole(s.ctx, user.ID, "admin") + assert.NoError(s.T(), err) + }) + + s.Run("HasPermission", func() { + s.Run("Admin can create users", func() { + user, err := s.svc.CreateUser(s.ctx, "admin@example.com", "password", "admin") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "users:create") + require.NoError(s.T(), err) + assert.True(s.T(), hasPermission) + }) + + s.Run("Editor cannot create users", func() { + user, err := s.svc.CreateUser(s.ctx, "editor@example.com", "password", "editor") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "users:create") + require.NoError(s.T(), err) + assert.False(s.T(), hasPermission) + }) + + s.Run("User cannot create users", func() { + user, err := s.svc.CreateUser(s.ctx, "user@example.com", "password", "user") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "users:create") + require.NoError(s.T(), err) + assert.False(s.T(), hasPermission) + }) + + s.Run("Editor can create posts", func() { + user, err := s.svc.CreateUser(s.ctx, "editor2@example.com", "password", "editor") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "posts:create") + require.NoError(s.T(), err) + assert.True(s.T(), hasPermission) + }) + + s.Run("User can read posts", func() { + user, err := s.svc.CreateUser(s.ctx, "user2@example.com", "password", "user") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "posts:read") + require.NoError(s.T(), err) + assert.True(s.T(), hasPermission) + }) + + s.Run("User cannot create posts", func() { + user, err := s.svc.CreateUser(s.ctx, "user3@example.com", "password", "user") + require.NoError(s.T(), err) + + hasPermission, err := s.svc.HasPermission(s.ctx, user.ID, "posts:create") + require.NoError(s.T(), err) + assert.False(s.T(), hasPermission) + }) + + s.Run("Invalid permission format", func() { + user, err := s.svc.CreateUser(s.ctx, "user4@example.com", "password", "user") + require.NoError(s.T(), err) + + _, err = s.svc.HasPermission(s.ctx, user.ID, "invalid_permission") + require.Error(s.T(), err) + assert.Contains(s.T(), err.Error(), "invalid permission format") + }) + }) +} + +func (s *ServiceImplTestSuite) TestCategory() { + // Create a test user with admin role for testing + adminUser, err := s.svc.CreateUser(s.ctx, "admin@example.com", "password123", "admin") + require.NoError(s.T(), err) + require.NotNil(s.T(), adminUser) + + s.Run("CreateCategory", func() { + // Test category creation + category, err := s.svc.CreateCategory(s.ctx) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), category) + assert.NotZero(s.T(), category.ID) + }) + + s.Run("AddCategoryContent", func() { + // Create a category first + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + require.NotNil(s.T(), category) + + testCases := []struct { + name string + langCode string + catName string + desc string + slug string + wantError bool + }{ + { + name: "Valid category content", + langCode: "en", + catName: "Test Category", + desc: "Test Description", + slug: "test-category", + wantError: false, + }, + { + name: "Empty language code", + langCode: "", + catName: "Test Category", + desc: "Test Description", + slug: "test-category-2", + wantError: true, + }, + { + name: "Empty name", + langCode: "en", + catName: "", + desc: "Test Description", + slug: "test-category-3", + wantError: true, + }, + { + name: "Empty slug", + langCode: "en", + catName: "Test Category", + desc: "Test Description", + slug: "", + wantError: true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + content, err := s.svc.AddCategoryContent(s.ctx, category.ID, tc.langCode, tc.catName, tc.desc, tc.slug) + if tc.wantError { + assert.Error(s.T(), err) + assert.Nil(s.T(), content) + } else { + assert.NoError(s.T(), err) + assert.NotNil(s.T(), content) + assert.Equal(s.T(), categorycontent.LanguageCode(tc.langCode), content.LanguageCode) + assert.Equal(s.T(), tc.catName, content.Name) + assert.Equal(s.T(), tc.desc, content.Description) + assert.Equal(s.T(), tc.slug, content.Slug) + } + }) + } + }) + + s.Run("GetCategoryBySlug", func() { + // Create a category with content first + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + require.NotNil(s.T(), category) + + content, err := s.svc.AddCategoryContent(s.ctx, category.ID, "en", "Test Category", "Test Description", "test-category-get") + require.NoError(s.T(), err) + require.NotNil(s.T(), content) + + s.Run("Existing category", func() { + found, err := s.svc.GetCategoryBySlug(s.ctx, "en", "test-category-get") + assert.NoError(s.T(), err) + assert.NotNil(s.T(), found) + assert.Equal(s.T(), category.ID, found.ID) + + // Check if content is loaded + require.NotEmpty(s.T(), found.Edges.Contents) + assert.Equal(s.T(), "Test Category", found.Edges.Contents[0].Name) + }) + + s.Run("Non-existing category", func() { + found, err := s.svc.GetCategoryBySlug(s.ctx, "en", "non-existent") + assert.Error(s.T(), err) + assert.Nil(s.T(), found) + }) + + s.Run("Wrong language code", func() { + found, err := s.svc.GetCategoryBySlug(s.ctx, "fr", "test-category-get") + assert.Error(s.T(), err) + assert.Nil(s.T(), found) + }) + }) + + s.Run("ListCategories", func() { + s.Run("List English categories", func() { + // 创建多个分类,但只有 3 个有英文内容 + var createdCategories []*ent.Category + for i := 0; i < 5; i++ { + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + require.NotNil(s.T(), category) + createdCategories = append(createdCategories, category) + + // 只给前 3 个分类添加英文内容 + if i < 3 { + _, err = s.svc.AddCategoryContent(s.ctx, category.ID, "en", + fmt.Sprintf("Category %d", i), + fmt.Sprintf("Description %d", i), + fmt.Sprintf("category-list-%d", i)) + require.NoError(s.T(), err) + } + } + + categories, err := s.svc.ListCategories(s.ctx, "en") + assert.NoError(s.T(), err) + assert.NotNil(s.T(), categories) + assert.Len(s.T(), categories, 3) + + // 检查所有返回的分类都有英文内容 + for _, cat := range categories { + assert.NotEmpty(s.T(), cat.Edges.Contents) + for _, content := range cat.Edges.Contents { + assert.Equal(s.T(), categorycontent.LanguageCodeEN, content.LanguageCode) + } + } + }) + + s.Run("List Chinese categories", func() { + // 创建多个分类,但只有 2 个有中文内容 + for i := 0; i < 4; i++ { + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + require.NotNil(s.T(), category) + + // 只给前 2 个分类添加中文内容 + if i < 2 { + _, err = s.svc.AddCategoryContent(s.ctx, category.ID, "zh-Hans", + fmt.Sprintf("分类 %d", i), + fmt.Sprintf("描述 %d", i), + fmt.Sprintf("category-list-%d", i)) + require.NoError(s.T(), err) + } + } + + categories, err := s.svc.ListCategories(s.ctx, "zh-Hans") + assert.NoError(s.T(), err) + assert.NotNil(s.T(), categories) + assert.Len(s.T(), categories, 2) + + // 检查所有返回的分类都有中文内容 + for _, cat := range categories { + assert.NotEmpty(s.T(), cat.Edges.Contents) + for _, content := range cat.Edges.Contents { + assert.Equal(s.T(), categorycontent.LanguageCodeZH_HANS, content.LanguageCode) + } + } + }) + + s.Run("List non-existing language", func() { + categories, err := s.svc.ListCategories(s.ctx, "fr") + assert.NoError(s.T(), err) + assert.Empty(s.T(), categories) + }) + }) +} + +func (s *ServiceImplTestSuite) TestGetCategories() { + ctx := context.Background() + + // 测试不支持的语言代码 + categories, err := s.svc.GetCategories(ctx, "invalid") + s.Require().NoError(err) + s.Empty(categories) + + // 创建测试数据 + cat1 := s.createTestCategory(ctx, "test-cat-1") + cat2 := s.createTestCategory(ctx, "test-cat-2") + + // 为分类添加不同语言的内容 + _, err = s.svc.AddCategoryContent(ctx, cat1.ID, "en", "Test Category 1", "Test Description 1", "category-list-test-1") + s.Require().NoError(err) + + _, err = s.svc.AddCategoryContent(ctx, cat2.ID, "zh-Hans", "测试分类2", "测试描述2", "category-list-test-2") + s.Require().NoError(err) + + // 测试获取英文分类 + enCategories, err := s.svc.GetCategories(ctx, "en") + s.Require().NoError(err) + s.Len(enCategories, 1) + s.Equal(cat1.ID, enCategories[0].ID) + + // 测试获取简体中文分类 + zhCategories, err := s.svc.GetCategories(ctx, "zh-Hans") + s.Require().NoError(err) + s.Len(zhCategories, 1) + s.Equal(cat2.ID, zhCategories[0].ID) + + // 测试获取繁体中文分类(应该为空) + zhHantCategories, err := s.svc.GetCategories(ctx, "zh-Hant") + s.Require().NoError(err) + s.Empty(zhHantCategories) +} + +func (s *ServiceImplTestSuite) TestGetUserRoles() { + ctx := context.Background() + + // 创建测试用户,默认会有 "user" 角色 + user, err := s.svc.CreateUser(ctx, "test@example.com", "password123", "user") + s.Require().NoError(err) + + // 测试新用户有默认的 "user" 角色 + roles, err := s.svc.GetUserRoles(ctx, user.ID) + s.Require().NoError(err) + s.Len(roles, 1) + s.Equal("user", roles[0].Name) + + // 分配角色给用户 + err = s.svc.AssignRole(ctx, user.ID, "admin") + s.Require().NoError(err) + + // 测试用户现在有两个角色 + roles, err = s.svc.GetUserRoles(ctx, user.ID) + s.Require().NoError(err) + s.Len(roles, 2) + roleNames := []string{roles[0].Name, roles[1].Name} + s.Contains(roleNames, "user") + s.Contains(roleNames, "admin") + + // 测试不存在的用户 + _, err = s.svc.GetUserRoles(ctx, -1) + s.Require().Error(err) +} + +func (s *ServiceImplTestSuite) TestDaily() { + // 创建一个测试分类 + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + require.NotNil(s.T(), category) + + // 添加分类内容 + categoryContent, err := s.svc.AddCategoryContent(s.ctx, category.ID, "en", "Test Category", "Test Description", "test-category") + require.NoError(s.T(), err) + require.NotNil(s.T(), categoryContent) + + dailyID := "250212" // 使用符合验证规则的 ID 格式:YYMMDD + + // 测试创建 Daily + s.Run("Create Daily", func() { + daily, err := s.svc.CreateDaily(s.ctx, dailyID, category.ID, "http://example.com/image.jpg") + require.NoError(s.T(), err) + require.NotNil(s.T(), daily) + assert.Equal(s.T(), dailyID, daily.ID) + assert.Equal(s.T(), category.ID, daily.Edges.Category.ID) + assert.Equal(s.T(), "http://example.com/image.jpg", daily.ImageURL) + }) + + // 测试添加 Daily 内容 + s.Run("Add Daily Content", func() { + content, err := s.svc.AddDailyContent(s.ctx, dailyID, "en", "Test quote for the day") + require.NoError(s.T(), err) + require.NotNil(s.T(), content) + assert.Equal(s.T(), dailycontent.LanguageCodeEN, content.LanguageCode) + assert.Equal(s.T(), "Test quote for the day", content.Quote) + }) + + // 测试获取 Daily + s.Run("Get Daily By ID", func() { + daily, err := s.svc.GetDailyByID(s.ctx, dailyID) + require.NoError(s.T(), err) + require.NotNil(s.T(), daily) + assert.Equal(s.T(), dailyID, daily.ID) + assert.Equal(s.T(), category.ID, daily.Edges.Category.ID) + }) + + // 测试列出 Daily + s.Run("List Dailies", func() { + // 创建另一个 Daily 用于测试列表 + anotherDailyID := "250213" + _, err := s.svc.CreateDaily(s.ctx, anotherDailyID, category.ID, "http://example.com/image2.jpg") + assert.NoError(s.T(), err) + _, err = s.svc.AddDailyContent(s.ctx, anotherDailyID, "en", "Another test quote") + assert.NoError(s.T(), err) + + // 测试列表功能 + dailies, err := s.svc.ListDailies(s.ctx, "en", &category.ID, 10, 0) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), dailies) + assert.Len(s.T(), dailies, 2) + + // 测试分页 + dailies, err = s.svc.ListDailies(s.ctx, "en", &category.ID, 1, 0) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), dailies) + assert.Len(s.T(), dailies, 1) + + // 测试无分类过滤 + dailies, err = s.svc.ListDailies(s.ctx, "en", nil, 10, 0) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), dailies) + assert.Len(s.T(), dailies, 2) + }) +} + +func (s *ServiceImplTestSuite) TestPost() { + s.Run("Create Post", func() { + s.Run("Draft", func() { + post, err := s.svc.CreatePost(s.ctx, "draft") + require.NoError(s.T(), err) + require.NotNil(s.T(), post) + assert.Equal(s.T(), "draft", post.Status.String()) + }) + + s.Run("Published", func() { + post, err := s.svc.CreatePost(s.ctx, "published") + require.NoError(s.T(), err) + require.NotNil(s.T(), post) + assert.Equal(s.T(), "published", post.Status.String()) + }) + + s.Run("Archived", func() { + post, err := s.svc.CreatePost(s.ctx, "archived") + require.NoError(s.T(), err) + require.NotNil(s.T(), post) + assert.Equal(s.T(), "archived", post.Status.String()) + }) + + s.Run("Invalid Status", func() { + post, err := s.svc.CreatePost(s.ctx, "invalid") + assert.Error(s.T(), err) + assert.Nil(s.T(), post) + }) + }) + + s.Run("Add Post Content", func() { + // Create a post first + post, err := s.svc.CreatePost(s.ctx, "draft") + require.NoError(s.T(), err) + + s.Run("English Content", func() { + content, err := s.svc.AddPostContent(s.ctx, post.ID, "en", "Test Post", "# Test Content", "Test Summary", "test,post", "Test Description") + require.NoError(s.T(), err) + require.NotNil(s.T(), content) + assert.Equal(s.T(), "en", content.LanguageCode.String()) + assert.Equal(s.T(), "Test Post", content.Title) + assert.Equal(s.T(), "# Test Content", content.ContentMarkdown) + assert.Equal(s.T(), "Test Summary", content.Summary) + assert.Equal(s.T(), "test,post", content.MetaKeywords) + assert.Equal(s.T(), "Test Description", content.MetaDescription) + assert.Equal(s.T(), "test-post", content.Slug) + }) + + s.Run("Simplified Chinese Content", func() { + content, err := s.svc.AddPostContent(s.ctx, post.ID, "zh-Hans", "测试帖子", "# 测试内容", "测试摘要", "测试,帖子", "测试描述") + require.NoError(s.T(), err) + require.NotNil(s.T(), content) + assert.Equal(s.T(), "zh-Hans", content.LanguageCode.String()) + assert.Equal(s.T(), "测试帖子", content.Title) + assert.Equal(s.T(), "# 测试内容", content.ContentMarkdown) + assert.Equal(s.T(), "测试摘要", content.Summary) + assert.Equal(s.T(), "测试,帖子", content.MetaKeywords) + assert.Equal(s.T(), "测试描述", content.MetaDescription) + assert.Equal(s.T(), "测试帖子", content.Slug) + }) + + s.Run("Traditional Chinese Content", func() { + content, err := s.svc.AddPostContent(s.ctx, post.ID, "zh-Hant", "測試貼文", "# 測試內容", "測試摘要", "測試,貼文", "測試描述") + require.NoError(s.T(), err) + require.NotNil(s.T(), content) + assert.Equal(s.T(), "zh-Hant", content.LanguageCode.String()) + assert.Equal(s.T(), "測試貼文", content.Title) + assert.Equal(s.T(), "# 測試內容", content.ContentMarkdown) + assert.Equal(s.T(), "測試摘要", content.Summary) + assert.Equal(s.T(), "測試,貼文", content.MetaKeywords) + assert.Equal(s.T(), "測試描述", content.MetaDescription) + assert.Equal(s.T(), "測試貼文", content.Slug) + }) + + s.Run("Invalid Language Code", func() { + content, err := s.svc.AddPostContent(s.ctx, post.ID, "fr", "Test Post", "# Test Content", "Test Summary", "test,post", "Test Description") + assert.Error(s.T(), err) + assert.Nil(s.T(), content) + }) + + s.Run("Non-existent Post", func() { + content, err := s.svc.AddPostContent(s.ctx, 999999, "en", "Test Post", "# Test Content", "Test Summary", "test,post", "Test Description") + assert.Error(s.T(), err) + assert.Nil(s.T(), content) + }) + }) + + s.Run("Get Post By Slug", func() { + // Create a post first + post, err := s.svc.CreatePost(s.ctx, "published") + require.NoError(s.T(), err) + + // Add content in different languages + _, err = s.svc.AddPostContent(s.ctx, post.ID, "en", "Test Post", "# Test Content", "Test Summary", "test,post", "Test Description") + require.NoError(s.T(), err) + _, err = s.svc.AddPostContent(s.ctx, post.ID, "zh-Hans", "测试帖子", "# 测试内容", "测试摘要", "测试,帖子", "测试描述") + require.NoError(s.T(), err) + + s.Run("Get Post By Slug - English", func() { + result, err := s.svc.GetPostBySlug(s.ctx, "en", "test-post") + require.NoError(s.T(), err) + require.NotNil(s.T(), result) + assert.Equal(s.T(), post.ID, result.ID) + assert.Equal(s.T(), "published", result.Status.String()) + + contents := result.Edges.Contents + require.Len(s.T(), contents, 1) + assert.Equal(s.T(), "en", contents[0].LanguageCode.String()) + assert.Equal(s.T(), "Test Post", contents[0].Title) + }) + + s.Run("Get Post By Slug - Chinese", func() { + result, err := s.svc.GetPostBySlug(s.ctx, "zh-Hans", "测试帖子") + require.NoError(s.T(), err) + require.NotNil(s.T(), result) + assert.Equal(s.T(), post.ID, result.ID) + assert.Equal(s.T(), "published", result.Status.String()) + + contents := result.Edges.Contents + require.Len(s.T(), contents, 1) + assert.Equal(s.T(), "zh-Hans", contents[0].LanguageCode.String()) + assert.Equal(s.T(), "测试帖子", contents[0].Title) + }) + + s.Run("Non-existent Post", func() { + result, err := s.svc.GetPostBySlug(s.ctx, "en", "non-existent") + assert.Error(s.T(), err) + assert.Nil(s.T(), result) + }) + + s.Run("Invalid Language Code", func() { + result, err := s.svc.GetPostBySlug(s.ctx, "fr", "test-post") + assert.Error(s.T(), err) + assert.Nil(s.T(), result) + }) + }) + + s.Run("List Posts", func() { + // Create some posts with content + for i := 0; i < 5; i++ { + post, err := s.svc.CreatePost(s.ctx, "published") + require.NoError(s.T(), err) + + // Add content in different languages + _, err = s.svc.AddPostContent(s.ctx, post.ID, "en", fmt.Sprintf("Post %d", i), "# Content", "Summary", "test", "Description") + require.NoError(s.T(), err) + _, err = s.svc.AddPostContent(s.ctx, post.ID, "zh-Hans", fmt.Sprintf("帖子 %d", i), "# 内容", "摘要", "测试", "描述") + require.NoError(s.T(), err) + } + + s.Run("List All Posts - English", func() { + posts, err := s.svc.ListPosts(s.ctx, "en", nil, 10, 0) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 5) + + // Check that all posts have English content + for _, post := range posts { + contents := post.Edges.Contents + require.Len(s.T(), contents, 1) + assert.Equal(s.T(), "en", contents[0].LanguageCode.String()) + } + }) + + s.Run("List All Posts - Chinese", func() { + posts, err := s.svc.ListPosts(s.ctx, "zh-Hans", nil, 10, 0) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 5) + + // Check that all posts have Chinese content + for _, post := range posts { + contents := post.Edges.Contents + require.Len(s.T(), contents, 1) + assert.Equal(s.T(), "zh-Hans", contents[0].LanguageCode.String()) + } + }) + + s.Run("List Posts with Pagination", func() { + // Get first page + posts, err := s.svc.ListPosts(s.ctx, "en", nil, 2, 0) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 2) + + // Get second page + posts, err = s.svc.ListPosts(s.ctx, "en", nil, 2, 2) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 2) + + // Get last page + posts, err = s.svc.ListPosts(s.ctx, "en", nil, 2, 4) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 1) + }) + + s.Run("List Posts by Category", func() { + // Create a category + category, err := s.svc.CreateCategory(s.ctx) + require.NoError(s.T(), err) + + // Create posts in this category + for i := 0; i < 3; i++ { + post, err := s.svc.CreatePost(s.ctx, "published") + require.NoError(s.T(), err) + + // Set category + _, err = s.client.Post.UpdateOne(post).SetCategoryID(category.ID).Save(s.ctx) + require.NoError(s.T(), err) + + // Add content + _, err = s.svc.AddPostContent(s.ctx, post.ID, "en", fmt.Sprintf("Category Post %d", i), "# Content", "Summary", "test", "Description") + require.NoError(s.T(), err) + } + + // List posts in this category + posts, err := s.svc.ListPosts(s.ctx, "en", &category.ID, 10, 0) + require.NoError(s.T(), err) + require.Len(s.T(), posts, 3) + + // Check that all posts belong to the category + for _, post := range posts { + assert.Equal(s.T(), category.ID, post.Edges.Category.ID) + } + }) + + s.Run("Invalid Language Code", func() { + posts, err := s.svc.ListPosts(s.ctx, "fr", nil, 10, 0) + assert.Error(s.T(), err) + assert.Nil(s.T(), posts) + }) + }) +} + +func (s *ServiceImplTestSuite) TestMedia() { + s.Run("Upload Media", func() { + // Create a user first + user, err := s.svc.CreateUser(s.ctx, "test@example.com", "password123", "") + require.NoError(s.T(), err) + require.NotNil(s.T(), user) + + // Mock file content + fileContent := []byte("test file content") + + // Mock the file header + fileHeader := &multipart.FileHeader{ + Filename: "test.jpg", + Size: int64(len(fileContent)), + Header: textproto.MIMEHeader{ + "Content-Type": []string{"image/jpeg"}, + }, + } + + // Mock the storage behavior + s.storage.EXPECT(). + Save(gomock.Any(), fileHeader.Filename, "image/jpeg", gomock.Any()). + DoAndReturn(func(ctx context.Context, name, contentType string, reader io.Reader) (*storage.FileInfo, error) { + // Verify the reader content + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + if !bytes.Equal(data, fileContent) { + return nil, fmt.Errorf("unexpected file content") + } + return &storage.FileInfo{ + ID: "test123", + Name: name, + Size: int64(len(fileContent)), + ContentType: contentType, + URL: "http://example.com/test.jpg", + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + }, nil + }).Times(1) + + // Replace the Open method + openFile = func(fh *multipart.FileHeader) (multipart.File, error) { + return &mockMultipartFile{bytes.NewReader(fileContent)}, nil + } + + // Test upload + media, err := s.svc.Upload(s.ctx, fileHeader, user.ID) + require.NoError(s.T(), err) + require.NotNil(s.T(), media) + assert.Equal(s.T(), "test123", media.StorageID) + assert.Equal(s.T(), "test.jpg", media.OriginalName) + assert.Equal(s.T(), int64(len(fileContent)), media.Size) + assert.Equal(s.T(), "image/jpeg", media.MimeType) + assert.Equal(s.T(), "http://example.com/test.jpg", media.URL) + assert.Equal(s.T(), strconv.Itoa(user.ID), media.CreatedBy) + + // Now we can test other operations since we have a media record + s.Run("Get Media", func() { + result, err := s.svc.GetMedia(s.ctx, media.ID) + require.NoError(s.T(), err) + require.NotNil(s.T(), result) + assert.Equal(s.T(), media.ID, result.ID) + assert.Equal(s.T(), media.StorageID, result.StorageID) + assert.Equal(s.T(), media.URL, result.URL) + }) + + s.Run("Get File", func() { + // Mock the storage behavior + mockReader := io.NopCloser(strings.NewReader("test content")) + mockFileInfo := &storage.FileInfo{ + ID: media.StorageID, + Name: media.OriginalName, + Size: media.Size, + ContentType: media.MimeType, + URL: media.URL, + CreatedAt: media.CreatedAt, + UpdatedAt: media.UpdatedAt, + } + s.storage.EXPECT(). + Get(gomock.Any(), media.StorageID). + Return(mockReader, mockFileInfo, nil) + + // Test get file + reader, fileInfo, err := s.svc.GetFile(s.ctx, media.ID) + require.NoError(s.T(), err) + require.NotNil(s.T(), reader) + require.NotNil(s.T(), fileInfo) + assert.Equal(s.T(), media.OriginalName, fileInfo.Name) + assert.Equal(s.T(), media.Size, fileInfo.Size) + assert.Equal(s.T(), media.MimeType, fileInfo.ContentType) + assert.Equal(s.T(), media.URL, fileInfo.URL) + + // Clean up + reader.Close() + }) + + s.Run("List Media", func() { + // Test list media + list, err := s.svc.ListMedia(s.ctx, 10, 0) + require.NoError(s.T(), err) + require.NotNil(s.T(), list) + require.Len(s.T(), list, 1) + assert.Equal(s.T(), "test.jpg", list[0].OriginalName) + }) + + s.Run("Delete Media", func() { + // Mock the storage behavior + s.storage.EXPECT(). + Delete(gomock.Any(), media.StorageID). + Return(nil) + + // Test delete media + err = s.svc.DeleteMedia(s.ctx, media.ID, user.ID) + require.NoError(s.T(), err) + + // Verify media is deleted + count, err := s.client.Media.Query().Count(s.ctx) + require.NoError(s.T(), err) + assert.Equal(s.T(), 0, count) + }) + }) + + s.Run("Delete Media - Unauthorized", func() { + // Create a user + user, err := s.svc.CreateUser(s.ctx, "another@example.com", "password123", "") + require.NoError(s.T(), err) + + // Mock file content + fileContent := []byte("test file content") + + // Mock the file header + fileHeader := &multipart.FileHeader{ + Filename: "test2.jpg", + Size: int64(len(fileContent)), + Header: textproto.MIMEHeader{ + "Content-Type": []string{"image/jpeg"}, + }, + } + + // Mock the storage behavior + s.storage.EXPECT(). + Save(gomock.Any(), fileHeader.Filename, "image/jpeg", gomock.Any()). + DoAndReturn(func(ctx context.Context, name, contentType string, reader io.Reader) (*storage.FileInfo, error) { + // Verify the reader content + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + if !bytes.Equal(data, fileContent) { + return nil, fmt.Errorf("unexpected file content") + } + return &storage.FileInfo{ + ID: "test456", + Name: name, + Size: int64(len(fileContent)), + ContentType: contentType, + URL: "http://example.com/test2.jpg", + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + }, nil + }).Times(1) + + // Replace the Open method + openFile = func(fh *multipart.FileHeader) (multipart.File, error) { + return &mockMultipartFile{bytes.NewReader(fileContent)}, nil + } + + media, err := s.svc.Upload(s.ctx, fileHeader, user.ID) + require.NoError(s.T(), err) + + // Try to delete with different user + anotherUser, err := s.svc.CreateUser(s.ctx, "third@example.com", "password123", "") + require.NoError(s.T(), err) + + err = s.svc.DeleteMedia(s.ctx, media.ID, anotherUser.ID) + assert.Equal(s.T(), ErrUnauthorized, err) + + // Verify media is not deleted + count, err := s.client.Media.Query().Count(s.ctx) + require.NoError(s.T(), err) + assert.Equal(s.T(), 1, count) + }) +} + +func (s *ServiceImplTestSuite) TestContributor() { + // 测试创建贡献者 + avatarURL := "https://example.com/avatar.jpg" + bio := "Test bio" + contributor, err := s.svc.CreateContributor(s.ctx, "Test Contributor", &avatarURL, &bio) + require.NoError(s.T(), err) + require.NotNil(s.T(), contributor) + assert.Equal(s.T(), "Test Contributor", contributor.Name) + assert.Equal(s.T(), avatarURL, contributor.AvatarURL) + assert.Equal(s.T(), bio, contributor.Bio) + + // 测试添加社交链接 + link, err := s.svc.AddContributorSocialLink(s.ctx, contributor.ID, "github", "GitHub", "https://github.com/test") + require.NoError(s.T(), err) + require.NotNil(s.T(), link) + assert.Equal(s.T(), "github", link.Type.String()) + assert.Equal(s.T(), "GitHub", link.Name) + assert.Equal(s.T(), "https://github.com/test", link.Value) + + // 测试获取贡献者 + fetchedContributor, err := s.svc.GetContributorByID(s.ctx, contributor.ID) + require.NoError(s.T(), err) + require.NotNil(s.T(), fetchedContributor) + assert.Equal(s.T(), contributor.ID, fetchedContributor.ID) + assert.Equal(s.T(), contributor.Name, fetchedContributor.Name) + assert.Equal(s.T(), contributor.AvatarURL, fetchedContributor.AvatarURL) + assert.Equal(s.T(), contributor.Bio, fetchedContributor.Bio) + require.Len(s.T(), fetchedContributor.Edges.SocialLinks, 1) + assert.Equal(s.T(), link.ID, fetchedContributor.Edges.SocialLinks[0].ID) + + // 测试列出贡献者 + contributors, err := s.svc.ListContributors(s.ctx) + require.NoError(s.T(), err) + require.NotEmpty(s.T(), contributors) + assert.Equal(s.T(), contributor.ID, contributors[0].ID) + require.Len(s.T(), contributors[0].Edges.SocialLinks, 1) + + // 测试错误情况 + _, err = s.svc.GetContributorByID(s.ctx, -1) + assert.Error(s.T(), err) + + _, err = s.svc.AddContributorSocialLink(s.ctx, -1, "github", "GitHub", "https://github.com/test") + assert.Error(s.T(), err) + + // 测试无效的社交链接类型 + _, err = s.svc.AddContributorSocialLink(s.ctx, contributor.ID, "invalid_type", "Invalid", "https://example.com") + assert.Error(s.T(), err) +} + +func TestServiceSuite(t *testing.T) { + suite.Run(t, new(ServiceSuite)) +} + +type ServiceSuite struct { + suite.Suite +} + +func TestServiceInterface(t *testing.T) { + var _ Service = (*serviceImpl)(nil) +} + +// 创建测试分类的辅助函数 +func (s *ServiceImplTestSuite) createTestCategory(ctx context.Context, slug string) *ent.Category { + category, err := s.svc.CreateCategory(ctx) + s.Require().NoError(err) + return category +} diff --git a/backend/internal/service/media.go b/backend/internal/service/media.go new file mode 100644 index 0000000..195d197 --- /dev/null +++ b/backend/internal/service/media.go @@ -0,0 +1,179 @@ +package service + +import ( + "bytes" + "context" + "fmt" + "io" + "mime/multipart" + "strings" + + "path/filepath" + "tss-rocks-be/ent" + "tss-rocks-be/internal/storage" + "tss-rocks-be/pkg/imageutil" +) + +type MediaService interface { + // Upload uploads a new file and creates a media record + Upload(ctx context.Context, file *multipart.FileHeader, userID int) (*ent.Media, error) + + // Get retrieves a media file by ID + Get(ctx context.Context, id int) (*ent.Media, error) + + // Delete deletes a media file + Delete(ctx context.Context, id int, userID int) error + + // List lists media files with pagination + List(ctx context.Context, limit, offset int) ([]*ent.Media, error) + + // GetFile gets the file content and info + GetFile(ctx context.Context, id int) (io.ReadCloser, *storage.FileInfo, error) +} + +type mediaService struct { + client *ent.Client + storage storage.Storage +} + +func NewMediaService(client *ent.Client, storage storage.Storage) MediaService { + return &mediaService{ + client: client, + storage: storage, + } +} + +// isValidFilename checks if a filename is valid +func isValidFilename(filename string) bool { + // Check for illegal characters + if strings.Contains(filename, "../") || + strings.Contains(filename, "./") || + strings.Contains(filename, "\\") { + return false + } + + return true +} + +func (s *mediaService) Upload(ctx context.Context, file *multipart.FileHeader, userID int) (*ent.Media, error) { + // Validate filename + if !isValidFilename(file.Filename) { + return nil, fmt.Errorf("invalid filename: %s", file.Filename) + } + + // Open the file + src, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer src.Close() + + // Read file content for processing + fileBytes, err := io.ReadAll(src) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + contentType := file.Header.Get("Content-Type") + filename := file.Filename + var processedBytes []byte + + // Process image if it's an image file + if imageutil.IsImageFormat(contentType) { + opts := imageutil.DefaultOptions() + processedBytes, err = imageutil.ProcessImage(bytes.NewReader(fileBytes), opts) + if err != nil { + return nil, fmt.Errorf("failed to process image: %w", err) + } + + // Update content type and filename for WebP + contentType = "image/webp" + filename = strings.TrimSuffix(filename, filepath.Ext(filename)) + ".webp" + } else { + processedBytes = fileBytes + } + + // Save the processed file + fileInfo, err := s.storage.Save(ctx, filename, contentType, bytes.NewReader(processedBytes)) + if err != nil { + return nil, fmt.Errorf("failed to save file: %w", err) + } + + // Create media record in database + media, err := s.client.Media.Create(). + SetStorageID(fileInfo.ID). + SetOriginalName(filename). + SetMimeType(contentType). + SetSize(int64(len(processedBytes))). + SetURL(fmt.Sprintf("/api/media/%s", fileInfo.ID)). + SetCreatedBy(fmt.Sprint(userID)). + Save(ctx) + if err != nil { + // Try to cleanup the stored file if database operation fails + _ = s.storage.Delete(ctx, fileInfo.ID) + return nil, fmt.Errorf("failed to create media record: %w", err) + } + + return media, nil +} + +func (s *mediaService) Get(ctx context.Context, id int) (*ent.Media, error) { + media, err := s.client.Media.Get(ctx, id) + if err != nil { + if ent.IsNotFound(err) { + return nil, fmt.Errorf("media not found: %d", id) + } + return nil, fmt.Errorf("failed to get media: %w", err) + } + return media, nil +} + +func (s *mediaService) Delete(ctx context.Context, id int, userID int) error { + media, err := s.Get(ctx, id) + if err != nil { + return err + } + + // Check ownership + if media.CreatedBy != fmt.Sprintf("%d", userID) { + return fmt.Errorf("unauthorized to delete media") + } + + // Delete from storage + if err := s.storage.Delete(ctx, media.StorageID); err != nil { + return fmt.Errorf("failed to delete file from storage: %w", err) + } + + // Delete from database + if err := s.client.Media.DeleteOne(media).Exec(ctx); err != nil { + return fmt.Errorf("failed to delete media record: %w", err) + } + + return nil +} + +func (s *mediaService) List(ctx context.Context, limit, offset int) ([]*ent.Media, error) { + media, err := s.client.Media.Query(). + Order(ent.Desc("created_at")). + Limit(limit). + Offset(offset). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list media: %w", err) + } + return media, nil +} + +func (s *mediaService) GetFile(ctx context.Context, id int) (io.ReadCloser, *storage.FileInfo, error) { + media, err := s.Get(ctx, id) + if err != nil { + return nil, nil, err + } + + reader, info, err := s.storage.Get(ctx, media.StorageID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get file from storage: %w", err) + } + + return reader, info, nil +} diff --git a/backend/internal/service/media_test.go b/backend/internal/service/media_test.go new file mode 100644 index 0000000..412689d --- /dev/null +++ b/backend/internal/service/media_test.go @@ -0,0 +1,332 @@ +package service + +import ( + "bytes" + "context" + "fmt" + "io" + "mime/multipart" + "net/textproto" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "tss-rocks-be/ent" + "tss-rocks-be/internal/storage" + "tss-rocks-be/internal/storage/mock" + "tss-rocks-be/internal/testutil" + + "bou.ke/monkey" +) + +type MediaServiceTestSuite struct { + suite.Suite + ctx context.Context + client *ent.Client + storage *mock.MockStorage + ctrl *gomock.Controller + svc MediaService +} + +func (s *MediaServiceTestSuite) SetupTest() { + s.ctx = context.Background() + s.client = testutil.NewTestClient() + require.NotNil(s.T(), s.client) + + s.ctrl = gomock.NewController(s.T()) + s.storage = mock.NewMockStorage(s.ctrl) + s.svc = NewMediaService(s.client, s.storage) + + // 清理数据库 + _, err := s.client.Media.Delete().Exec(s.ctx) + require.NoError(s.T(), err) +} + +func (s *MediaServiceTestSuite) TearDownTest() { + s.ctrl.Finish() + s.client.Close() +} + +func TestMediaServiceSuite(t *testing.T) { + suite.Run(t, new(MediaServiceTestSuite)) +} + +type mockFileHeader struct { + filename string + contentType string + size int64 + content []byte +} + +func (h *mockFileHeader) Open() (multipart.File, error) { + return newMockMultipartFile(h.content), nil +} + +func (h *mockFileHeader) Filename() string { + return h.filename +} + +func (h *mockFileHeader) Size() int64 { + return h.size +} + +func (h *mockFileHeader) Header() textproto.MIMEHeader { + header := make(textproto.MIMEHeader) + header.Set("Content-Type", h.contentType) + return header +} + +func (s *MediaServiceTestSuite) createTestFile(filename, contentType string, content []byte) *multipart.FileHeader { + header := &multipart.FileHeader{ + Filename: filename, + Header: make(textproto.MIMEHeader), + Size: int64(len(content)), + } + header.Header.Set("Content-Type", contentType) + + monkey.PatchInstanceMethod(reflect.TypeOf(header), "Open", func(_ *multipart.FileHeader) (multipart.File, error) { + return newMockMultipartFile(content), nil + }) + + return header +} + +func (s *MediaServiceTestSuite) TestUpload() { + testCases := []struct { + name string + filename string + contentType string + content []byte + setupMock func() + wantErr bool + errMsg string + }{ + { + name: "Upload text file", + filename: "test.txt", + contentType: "text/plain", + content: []byte("test content"), + setupMock: func() { + s.storage.EXPECT(). + Save(gomock.Any(), "test.txt", "text/plain", gomock.Any()). + DoAndReturn(func(ctx context.Context, name, contentType string, reader io.Reader) (*storage.FileInfo, error) { + content, err := io.ReadAll(reader) + s.Require().NoError(err) + s.Equal([]byte("test content"), content) + return &storage.FileInfo{ + ID: "test-id", + Name: "test.txt", + ContentType: "text/plain", + Size: int64(len(content)), + }, nil + }) + }, + wantErr: false, + }, + { + name: "Invalid filename", + filename: "../test.txt", + contentType: "text/plain", + content: []byte("test content"), + setupMock: func() {}, + wantErr: true, + errMsg: "invalid filename", + }, + { + name: "Storage error", + filename: "test.txt", + contentType: "text/plain", + content: []byte("test content"), + setupMock: func() { + s.storage.EXPECT(). + Save(gomock.Any(), "test.txt", "text/plain", gomock.Any()). + Return(nil, fmt.Errorf("storage error")) + }, + wantErr: true, + errMsg: "storage error", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + // Setup mock + tc.setupMock() + + // Create test file + fileHeader := s.createTestFile(tc.filename, tc.contentType, tc.content) + + // Add debug output + s.T().Logf("Testing file: %s, content-type: %s, size: %d", fileHeader.Filename, fileHeader.Header.Get("Content-Type"), fileHeader.Size) + + // Test upload + media, err := s.svc.Upload(s.ctx, fileHeader, 1) + + // Add debug output + if err != nil { + s.T().Logf("Upload error: %v", err) + } + + if tc.wantErr { + s.Require().Error(err) + s.Contains(err.Error(), tc.errMsg) + return + } + + s.Require().NoError(err) + s.NotNil(media) + s.Equal(tc.filename, media.OriginalName) + s.Equal(tc.contentType, media.MimeType) + s.Equal(int64(len(tc.content)), media.Size) + s.Equal("1", media.CreatedBy) + }) + } +} + +func (s *MediaServiceTestSuite) TestGet() { + // Create test media + media, err := s.client.Media.Create(). + SetStorageID("test-id"). + SetOriginalName("test.txt"). + SetMimeType("text/plain"). + SetSize(12). + SetURL("/api/media/test-id"). + SetCreatedBy("1"). + Save(s.ctx) + s.Require().NoError(err) + + // Test get existing media + result, err := s.svc.Get(s.ctx, media.ID) + s.Require().NoError(err) + s.Equal(media.ID, result.ID) + s.Equal(media.OriginalName, result.OriginalName) + + // Test get non-existing media + _, err = s.svc.Get(s.ctx, -1) + s.Require().Error(err) + s.Contains(err.Error(), "media not found") +} + +func (s *MediaServiceTestSuite) TestDelete() { + // Create test media + media, err := s.client.Media.Create(). + SetStorageID("test-id"). + SetOriginalName("test.txt"). + SetMimeType("text/plain"). + SetSize(12). + SetURL("/api/media/test-id"). + SetCreatedBy("1"). + Save(s.ctx) + s.Require().NoError(err) + + // Test delete by unauthorized user + err = s.svc.Delete(s.ctx, media.ID, 2) + s.Require().Error(err) + s.Contains(err.Error(), "unauthorized") + + // Test delete by owner + s.storage.EXPECT(). + Delete(gomock.Any(), "test-id"). + Return(nil) + err = s.svc.Delete(s.ctx, media.ID, 1) + s.Require().NoError(err) + + // Verify media is deleted + _, err = s.svc.Get(s.ctx, media.ID) + s.Require().Error(err) + s.Contains(err.Error(), "not found") +} + +func (s *MediaServiceTestSuite) TestList() { + // Create test media + for i := 0; i < 5; i++ { + _, err := s.client.Media.Create(). + SetStorageID(fmt.Sprintf("test-id-%d", i)). + SetOriginalName(fmt.Sprintf("test-%d.txt", i)). + SetMimeType("text/plain"). + SetSize(12). + SetURL(fmt.Sprintf("/api/media/test-id-%d", i)). + SetCreatedBy("1"). + Save(s.ctx) + s.Require().NoError(err) + } + + // Test list with limit and offset + media, err := s.svc.List(s.ctx, 3, 1) + s.Require().NoError(err) + s.Len(media, 3) +} + +func (s *MediaServiceTestSuite) TestGetFile() { + // Create test media + media, err := s.client.Media.Create(). + SetStorageID("test-id"). + SetOriginalName("test.txt"). + SetMimeType("text/plain"). + SetSize(12). + SetURL("/api/media/test-id"). + SetCreatedBy("1"). + Save(s.ctx) + s.Require().NoError(err) + + // Mock storage.Get + mockReader := io.NopCloser(bytes.NewReader([]byte("test content"))) + mockFileInfo := &storage.FileInfo{ + ID: "test-id", + Name: "test.txt", + ContentType: "text/plain", + Size: 12, + } + s.storage.EXPECT(). + Get(gomock.Any(), "test-id"). + Return(mockReader, mockFileInfo, nil) + + // Test get file + reader, info, err := s.svc.GetFile(s.ctx, media.ID) + s.Require().NoError(err) + s.NotNil(reader) + s.Equal(mockFileInfo, info) + + // Test get non-existing file + _, _, err = s.svc.GetFile(s.ctx, -1) + s.Require().Error(err) + s.Contains(err.Error(), "not found") +} + +func (s *MediaServiceTestSuite) TestIsValidFilename() { + testCases := []struct { + name string + filename string + want bool + }{ + { + name: "Valid filename", + filename: "test.txt", + want: true, + }, + { + name: "Invalid filename with ../", + filename: "../test.txt", + want: false, + }, + { + name: "Invalid filename with ./", + filename: "./test.txt", + want: false, + }, + { + name: "Invalid filename with backslash", + filename: "test\\file.txt", + want: false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + got := isValidFilename(tc.filename) + s.Equal(tc.want, got) + }) + } +} diff --git a/backend/internal/service/mock/mock.go b/backend/internal/service/mock/mock.go new file mode 100644 index 0000000..b9ee689 --- /dev/null +++ b/backend/internal/service/mock/mock.go @@ -0,0 +1,3 @@ +package mock + +//go:generate mockgen -source=../service.go -destination=mock_service.go -package=mock diff --git a/backend/internal/service/rbac_service.go b/backend/internal/service/rbac_service.go new file mode 100644 index 0000000..48e8f6f --- /dev/null +++ b/backend/internal/service/rbac_service.go @@ -0,0 +1,105 @@ +package service + +import ( + "context" + "fmt" + + "tss-rocks-be/ent" + "tss-rocks-be/ent/permission" + "tss-rocks-be/ent/role" +) + +type RBACService struct { + client *ent.Client +} + +func NewRBACService(client *ent.Client) *RBACService { + return &RBACService{ + client: client, + } +} + +// InitializeRBAC sets up the initial RBAC configuration +func (s *RBACService) InitializeRBAC(ctx context.Context) error { + // Create admin role if it doesn't exist + adminRole, err := s.client.Role.Query(). + Where(role.Name("admin")). + Only(ctx) + if ent.IsNotFound(err) { + adminRole, err = s.client.Role.Create(). + SetName("admin"). + Save(ctx) + if err != nil { + return fmt.Errorf("failed to create admin role: %w", err) + } + } else if err != nil { + return fmt.Errorf("failed to query admin role: %w", err) + } + + // Create editor role if it doesn't exist + editorRole, err := s.client.Role.Query(). + Where(role.Name("editor")). + Only(ctx) + if ent.IsNotFound(err) { + editorRole, err = s.client.Role.Create(). + SetName("editor"). + Save(ctx) + if err != nil { + return fmt.Errorf("failed to create editor role: %w", err) + } + } else if err != nil { + return fmt.Errorf("failed to query editor role: %w", err) + } + + // Define permissions + permissions := []struct { + role *ent.Role + resource string + actions []string + }{ + {adminRole, "users", []string{"create", "read", "update", "delete", "assign_role"}}, + {adminRole, "roles", []string{"create", "read", "update", "delete"}}, + {adminRole, "media", []string{"create", "read", "update", "delete"}}, + {adminRole, "posts", []string{"create", "read", "update", "delete"}}, + {adminRole, "categories", []string{"create", "read", "update", "delete"}}, + {adminRole, "contributors", []string{"create", "read", "update", "delete"}}, + {adminRole, "dailies", []string{"create", "read", "update", "delete"}}, + + {editorRole, "media", []string{"create", "read", "update"}}, + {editorRole, "posts", []string{"create", "read", "update"}}, + {editorRole, "categories", []string{"read"}}, + {editorRole, "contributors", []string{"read"}}, + {editorRole, "dailies", []string{"create", "read", "update"}}, + } + + // Create permissions for each role + for _, p := range permissions { + for _, action := range p.actions { + // Check if permission already exists + exists, err := s.client.Permission.Query(). + Where( + permission.Resource(p.resource), + permission.Action(action), + permission.HasRolesWith(role.ID(p.role.ID)), + ). + Exist(ctx) + if err != nil { + return fmt.Errorf("failed to query permission: %w", err) + } + + if !exists { + // Create permission and associate it with the role + _, err = s.client.Permission.Create(). + SetResource(p.resource). + SetAction(action). + AddRoles(p.role). + Save(ctx) + if err != nil { + return fmt.Errorf("failed to create permission: %w", err) + } + } + } + } + + return nil +} diff --git a/backend/internal/service/service.go b/backend/internal/service/service.go new file mode 100644 index 0000000..3920125 --- /dev/null +++ b/backend/internal/service/service.go @@ -0,0 +1,59 @@ +package service + +//go:generate mockgen -source=service.go -destination=mock/mock_service.go -package=mock + +import ( + "context" + "io" + "mime/multipart" + + "tss-rocks-be/ent" + "tss-rocks-be/internal/storage" +) + +// Service interface defines all business logic operations +type Service interface { + // User operations + CreateUser(ctx context.Context, email, password string, role string) (*ent.User, error) + GetUserByEmail(ctx context.Context, email string) (*ent.User, error) + ValidatePassword(ctx context.Context, user *ent.User, password string) bool + + // Category operations + CreateCategory(ctx context.Context) (*ent.Category, error) + AddCategoryContent(ctx context.Context, categoryID int, langCode, name, description, slug string) (*ent.CategoryContent, error) + GetCategoryBySlug(ctx context.Context, langCode, slug string) (*ent.Category, error) + ListCategories(ctx context.Context, langCode string) ([]*ent.Category, error) + GetCategories(ctx context.Context, langCode string) ([]*ent.Category, error) + + // Post operations + CreatePost(ctx context.Context, status string) (*ent.Post, error) + AddPostContent(ctx context.Context, postID int, langCode, title, content, summary string, metaKeywords, metaDescription string) (*ent.PostContent, error) + GetPostBySlug(ctx context.Context, langCode, slug string) (*ent.Post, error) + ListPosts(ctx context.Context, langCode string, categoryID *int, limit, offset int) ([]*ent.Post, error) + + // Contributor operations + CreateContributor(ctx context.Context, name string, avatarURL, bio *string) (*ent.Contributor, error) + AddContributorSocialLink(ctx context.Context, contributorID int, linkType, name, value string) (*ent.ContributorSocialLink, error) + GetContributorByID(ctx context.Context, id int) (*ent.Contributor, error) + ListContributors(ctx context.Context) ([]*ent.Contributor, error) + + // Daily operations + CreateDaily(ctx context.Context, id string, categoryID int, imageURL string) (*ent.Daily, error) + AddDailyContent(ctx context.Context, dailyID string, langCode, quote string) (*ent.DailyContent, error) + GetDailyByID(ctx context.Context, id string) (*ent.Daily, error) + ListDailies(ctx context.Context, langCode string, categoryID *int, limit, offset int) ([]*ent.Daily, error) + + // Media operations + ListMedia(ctx context.Context, limit, offset int) ([]*ent.Media, error) + Upload(ctx context.Context, file *multipart.FileHeader, userID int) (*ent.Media, error) + GetMedia(ctx context.Context, id int) (*ent.Media, error) + GetFile(ctx context.Context, id int) (io.ReadCloser, *storage.FileInfo, error) + DeleteMedia(ctx context.Context, id int, userID int) error + + // RBAC operations + AssignRole(ctx context.Context, userID int, role string) error + RemoveRole(ctx context.Context, userID int, role string) error + GetUserRoles(ctx context.Context, userID int) ([]*ent.Role, error) + HasPermission(ctx context.Context, userID int, permission string) (bool, error) + InitializeRBAC(ctx context.Context) error +} diff --git a/backend/internal/storage/factory.go b/backend/internal/storage/factory.go new file mode 100644 index 0000000..b1fe210 --- /dev/null +++ b/backend/internal/storage/factory.go @@ -0,0 +1,66 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "tss-rocks-be/internal/config" +) + +// NewStorage creates a new storage instance based on the configuration +func NewStorage(ctx context.Context, cfg *config.StorageConfig) (Storage, error) { + switch cfg.Type { + case "local": + return NewLocalStorage(cfg.Local.RootDir) + case "s3": + // Load AWS configuration + var s3Client *s3.Client + + if cfg.S3.Endpoint != "" { + // Custom endpoint (e.g., MinIO) + customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + URL: cfg.S3.Endpoint, + }, nil + }) + + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, + awsconfig.WithRegion(cfg.S3.Region), + awsconfig.WithEndpointResolverWithOptions(customResolver), + awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + cfg.S3.AccessKeyID, + cfg.S3.SecretAccessKey, + "", + )), + ) + if err != nil { + return nil, fmt.Errorf("unable to load AWS SDK config: %w", err) + } + + s3Client = s3.NewFromConfig(awsCfg) + } else { + // Standard AWS S3 + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, + awsconfig.WithRegion(cfg.S3.Region), + awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + cfg.S3.AccessKeyID, + cfg.S3.SecretAccessKey, + "", + )), + ) + if err != nil { + return nil, fmt.Errorf("unable to load AWS SDK config: %w", err) + } + + s3Client = s3.NewFromConfig(awsCfg) + } + + return NewS3Storage(s3Client, cfg.S3.Bucket, cfg.S3.CustomURL, cfg.S3.ProxyS3), nil + default: + return nil, fmt.Errorf("unsupported storage type: %s", cfg.Type) + } +} diff --git a/backend/internal/storage/local.go b/backend/internal/storage/local.go new file mode 100644 index 0000000..2b1d8bf --- /dev/null +++ b/backend/internal/storage/local.go @@ -0,0 +1,260 @@ +package storage + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" +) + +type LocalStorage struct { + rootDir string + metaDir string +} + +func NewLocalStorage(rootDir string) (*LocalStorage, error) { + // Ensure the root directory exists + if err := os.MkdirAll(rootDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create root directory: %w", err) + } + + // Create metadata directory + metaDir := filepath.Join(rootDir, ".meta") + if err := os.MkdirAll(metaDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create metadata directory: %w", err) + } + + return &LocalStorage{ + rootDir: rootDir, + metaDir: metaDir, + }, nil +} + +func (s *LocalStorage) generateID() (string, error) { + bytes := make([]byte, 16) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func (s *LocalStorage) saveMetadata(id string, info *FileInfo) error { + metaPath := filepath.Join(s.metaDir, id+".meta") + file, err := os.Create(metaPath) + if err != nil { + return fmt.Errorf("failed to create metadata file: %w", err) + } + defer file.Close() + + data := fmt.Sprintf("%s\n%s", info.Name, info.ContentType) + if _, err := file.WriteString(data); err != nil { + return fmt.Errorf("failed to write metadata: %w", err) + } + return nil +} + +func (s *LocalStorage) loadMetadata(id string) (string, string, error) { + metaPath := filepath.Join(s.metaDir, id+".meta") + data, err := os.ReadFile(metaPath) + if err != nil { + if os.IsNotExist(err) { + return id, "", nil // Return ID as name if metadata doesn't exist + } + return "", "", fmt.Errorf("failed to read metadata: %w", err) + } + + parts := bytes.Split(data, []byte("\n")) + name := string(parts[0]) + contentType := "" + if len(parts) > 1 { + contentType = string(parts[1]) + } + return name, contentType, nil +} + +func (s *LocalStorage) Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) { + if reader == nil { + return nil, fmt.Errorf("reader cannot be nil") + } + + // Generate a unique ID for the file + id, err := s.generateID() + if err != nil { + return nil, fmt.Errorf("failed to generate file ID: %w", err) + } + + // Create the file path + filePath := filepath.Join(s.rootDir, id) + + // Create the file + file, err := os.Create(filePath) + if err != nil { + return nil, fmt.Errorf("failed to create file: %w", err) + } + defer file.Close() + + // Copy the content + size, err := io.Copy(file, reader) + if err != nil { + // Clean up the file if there's an error + os.Remove(filePath) + return nil, fmt.Errorf("failed to write file content: %w", err) + } + + now := time.Now() + info := &FileInfo{ + ID: id, + Name: name, + Size: size, + ContentType: contentType, + CreatedAt: now, + UpdatedAt: now, + URL: fmt.Sprintf("/api/media/file/%s", id), + } + + // Save metadata + if err := s.saveMetadata(id, info); err != nil { + os.Remove(filePath) + return nil, err + } + + return info, nil +} + +func (s *LocalStorage) Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) { + filePath := filepath.Join(s.rootDir, id) + + // Open the file + file, err := os.Open(filePath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, fmt.Errorf("file not found: %s", id) + } + return nil, nil, fmt.Errorf("failed to open file: %w", err) + } + + // Get file info + stat, err := file.Stat() + if err != nil { + file.Close() + return nil, nil, fmt.Errorf("failed to get file info: %w", err) + } + + // Load metadata + name, contentType, err := s.loadMetadata(id) + if err != nil { + file.Close() + return nil, nil, err + } + + info := &FileInfo{ + ID: id, + Name: name, + Size: stat.Size(), + ContentType: contentType, + CreatedAt: stat.ModTime(), + UpdatedAt: stat.ModTime(), + URL: fmt.Sprintf("/api/media/file/%s", id), + } + + return file, info, nil +} + +func (s *LocalStorage) Delete(ctx context.Context, id string) error { + filePath := filepath.Join(s.rootDir, id) + if err := os.Remove(filePath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("file not found: %s", id) + } + return fmt.Errorf("failed to delete file: %w", err) + } + + // Remove metadata + metaPath := filepath.Join(s.metaDir, id+".meta") + if err := os.Remove(metaPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove metadata: %w", err) + } + + return nil +} + +func (s *LocalStorage) List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) { + var files []*FileInfo + var count int + + err := filepath.Walk(s.rootDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories and metadata directory + if info.IsDir() || path == s.metaDir { + if path == s.metaDir { + return filepath.SkipDir + } + return nil + } + + // Get the file ID (basename of the path) + id := filepath.Base(path) + + // Load metadata to get the original name + name, contentType, err := s.loadMetadata(id) + if err != nil { + return err + } + + // Skip files that don't match the prefix + if prefix != "" && !strings.HasPrefix(name, prefix) { + return nil + } + + // Skip files before offset + if count < offset { + count++ + return nil + } + + // Stop if we've reached the limit + if limit > 0 && len(files) >= limit { + return filepath.SkipDir + } + + files = append(files, &FileInfo{ + ID: id, + Name: name, + Size: info.Size(), + ContentType: contentType, + CreatedAt: info.ModTime(), + UpdatedAt: info.ModTime(), + URL: fmt.Sprintf("/api/media/file/%s", id), + }) + + count++ + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + + return files, nil +} + +func (s *LocalStorage) Exists(ctx context.Context, id string) (bool, error) { + filePath := filepath.Join(s.rootDir, id) + _, err := os.Stat(filePath) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, fmt.Errorf("failed to check file existence: %w", err) +} diff --git a/backend/internal/storage/local_test.go b/backend/internal/storage/local_test.go new file mode 100644 index 0000000..f27a16f --- /dev/null +++ b/backend/internal/storage/local_test.go @@ -0,0 +1,154 @@ +package storage + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLocalStorage(t *testing.T) { + // Create a temporary directory for testing + tempDir, err := os.MkdirTemp("", "storage_test_*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Create a new LocalStorage instance + storage, err := NewLocalStorage(tempDir) + require.NoError(t, err) + + ctx := context.Background() + + t.Run("Save and Get", func(t *testing.T) { + content := []byte("test content") + reader := bytes.NewReader(content) + + // Save the file + fileInfo, err := storage.Save(ctx, "test.txt", "text/plain", reader) + require.NoError(t, err) + assert.NotEmpty(t, fileInfo.ID) + assert.Equal(t, "test.txt", fileInfo.Name) + assert.Equal(t, int64(len(content)), fileInfo.Size) + assert.Equal(t, "text/plain", fileInfo.ContentType) + assert.False(t, fileInfo.CreatedAt.IsZero()) + + // Get the file + readCloser, info, err := storage.Get(ctx, fileInfo.ID) + require.NoError(t, err) + defer readCloser.Close() + + data, err := io.ReadAll(readCloser) + require.NoError(t, err) + assert.Equal(t, content, data) + assert.Equal(t, fileInfo.ID, info.ID) + assert.Equal(t, fileInfo.Name, info.Name) + assert.Equal(t, fileInfo.Size, info.Size) + }) + + t.Run("List", func(t *testing.T) { + // Clear the directory first + dirEntries, err := os.ReadDir(tempDir) + require.NoError(t, err) + for _, entry := range dirEntries { + if entry.Name() != ".meta" { + os.Remove(filepath.Join(tempDir, entry.Name())) + } + } + + // Save multiple files + testFiles := []struct { + name string + content string + }{ + {"test1.txt", "content1"}, + {"test2.txt", "content2"}, + {"other.txt", "content3"}, + } + + for _, f := range testFiles { + reader := bytes.NewReader([]byte(f.content)) + _, err := storage.Save(ctx, f.name, "text/plain", reader) + require.NoError(t, err) + } + + // List all files + allFiles, err := storage.List(ctx, "", 10, 0) + require.NoError(t, err) + assert.Len(t, allFiles, 3) + + // List files with prefix + filesWithPrefix, err := storage.List(ctx, "test", 10, 0) + require.NoError(t, err) + assert.Len(t, filesWithPrefix, 2) + for _, f := range filesWithPrefix { + assert.True(t, strings.HasPrefix(f.Name, "test")) + } + + // Test pagination + pagedFiles, err := storage.List(ctx, "", 2, 1) + require.NoError(t, err) + assert.Len(t, pagedFiles, 2) + }) + + t.Run("Exists", func(t *testing.T) { + // Save a file + content := []byte("test content") + reader := bytes.NewReader(content) + fileInfo, err := storage.Save(ctx, "exists.txt", "text/plain", reader) + require.NoError(t, err) + + // Check if file exists + exists, err := storage.Exists(ctx, fileInfo.ID) + require.NoError(t, err) + assert.True(t, exists) + + // Check non-existent file + exists, err = storage.Exists(ctx, "non-existent") + require.NoError(t, err) + assert.False(t, exists) + }) + + t.Run("Delete", func(t *testing.T) { + // Save a file + content := []byte("test content") + reader := bytes.NewReader(content) + fileInfo, err := storage.Save(ctx, "delete.txt", "text/plain", reader) + require.NoError(t, err) + + // Delete the file + err = storage.Delete(ctx, fileInfo.ID) + require.NoError(t, err) + + // Verify file is deleted + exists, err := storage.Exists(ctx, fileInfo.ID) + require.NoError(t, err) + assert.False(t, exists) + + // Try to delete non-existent file + err = storage.Delete(ctx, "non-existent") + assert.Error(t, err) + }) + + t.Run("Invalid operations", func(t *testing.T) { + // Try to get non-existent file + _, _, err := storage.Get(ctx, "non-existent") + assert.Error(t, err) + assert.Contains(t, err.Error(), "file not found") + + // Try to save file with nil reader + _, err = storage.Save(ctx, "test.txt", "text/plain", nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "reader cannot be nil") + + // Try to delete non-existent file + err = storage.Delete(ctx, "non-existent") + assert.Error(t, err) + assert.Contains(t, err.Error(), "file not found") + }) +} diff --git a/backend/internal/storage/s3.go b/backend/internal/storage/s3.go new file mode 100644 index 0000000..bea236d --- /dev/null +++ b/backend/internal/storage/s3.go @@ -0,0 +1,232 @@ +package storage + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +type S3Storage struct { + client s3Client + bucket string + customURL string + proxyS3 bool +} + +// s3Client is the interface that wraps the basic S3 client operations we need +type s3Client interface { + PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) + GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) + DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) + ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) + HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) +} + +func NewS3Storage(client s3Client, bucket string, customURL string, proxyS3 bool) *S3Storage { + return &S3Storage{ + client: client, + bucket: bucket, + customURL: customURL, + proxyS3: proxyS3, + } +} + +func (s *S3Storage) generateID() (string, error) { + bytes := make([]byte, 16) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func (s *S3Storage) getObjectURL(id string) string { + if s.customURL != "" { + return fmt.Sprintf("%s/%s", strings.TrimRight(s.customURL, "/"), id) + } + if s.proxyS3 { + return fmt.Sprintf("/api/media/file/%s", id) + } + return fmt.Sprintf("https://%s.s3.amazonaws.com/%s", s.bucket, id) +} + +func (s *S3Storage) Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) { + // Generate a unique ID for the file + id, err := s.generateID() + if err != nil { + return nil, fmt.Errorf("failed to generate file ID: %w", err) + } + + // Check if the file exists + _, err = s.client.HeadObject(ctx, &s3.HeadObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(id), + }) + if err == nil { + return nil, fmt.Errorf("file already exists with ID: %s", id) + } + + var noSuchKey *types.NoSuchKey + if !errors.As(err, &noSuchKey) { + return nil, fmt.Errorf("failed to check if file exists: %w", err) + } + + // Upload the file + _, err = s.client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(id), + Body: reader, + ContentType: aws.String(contentType), + Metadata: map[string]string{ + "x-amz-meta-original-name": name, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to upload file: %w", err) + } + + now := time.Now() + info := &FileInfo{ + ID: id, + Name: name, + Size: 0, // Size is not available until after upload + ContentType: contentType, + CreatedAt: now, + UpdatedAt: now, + URL: s.getObjectURL(id), + } + + return info, nil +} + +func (s *S3Storage) Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) { + // Get the object from S3 + result, err := s.client.GetObject(ctx, &s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(id), + }) + if err != nil { + return nil, nil, fmt.Errorf("failed to get file from S3: %w", err) + } + + info := &FileInfo{ + ID: id, + Name: result.Metadata["x-amz-meta-original-name"], + Size: aws.ToInt64(result.ContentLength), + ContentType: aws.ToString(result.ContentType), + CreatedAt: aws.ToTime(result.LastModified), + UpdatedAt: aws.ToTime(result.LastModified), + URL: s.getObjectURL(id), + } + + return result.Body, info, nil +} + +func (s *S3Storage) Delete(ctx context.Context, id string) error { + _, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(id), + }) + if err != nil { + return fmt.Errorf("failed to delete file from S3: %w", err) + } + + return nil +} + +func (s *S3Storage) List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) { + var files []*FileInfo + var continuationToken *string + + // Skip objects for offset + for i := 0; i < offset/1000; i++ { + output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(prefix), + ContinuationToken: continuationToken, + MaxKeys: aws.Int32(1000), + }) + if err != nil { + return nil, fmt.Errorf("failed to list files from S3: %w", err) + } + if !aws.ToBool(output.IsTruncated) { + return files, nil + } + continuationToken = output.NextContinuationToken + } + + // Get the actual objects + output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(prefix), + ContinuationToken: continuationToken, + MaxKeys: aws.Int32(int32(limit)), + }) + if err != nil { + return nil, fmt.Errorf("failed to list files from S3: %w", err) + } + + for _, obj := range output.Contents { + // Get the object metadata + head, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{ + Bucket: aws.String(s.bucket), + Key: obj.Key, + }) + + var contentType string + var originalName string + + if err != nil { + var noSuchKey *types.NoSuchKey + if errors.As(err, &noSuchKey) { + // If the object doesn't exist (which shouldn't happen normally), + // we'll still include it in the list but with empty metadata + contentType = "" + originalName = aws.ToString(obj.Key) + } else { + continue + } + } else { + contentType = aws.ToString(head.ContentType) + originalName = head.Metadata["x-amz-meta-original-name"] + if originalName == "" { + originalName = aws.ToString(obj.Key) + } + } + + files = append(files, &FileInfo{ + ID: aws.ToString(obj.Key), + Name: originalName, + Size: aws.ToInt64(obj.Size), + ContentType: contentType, + CreatedAt: aws.ToTime(obj.LastModified), + UpdatedAt: aws.ToTime(obj.LastModified), + URL: s.getObjectURL(aws.ToString(obj.Key)), + }) + } + + return files, nil +} + +func (s *S3Storage) Exists(ctx context.Context, id string) (bool, error) { + _, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(id), + }) + if err != nil { + var nsk *types.NoSuchKey + if ok := errors.As(err, &nsk); ok { + return false, nil + } + return false, fmt.Errorf("failed to check file existence in S3: %w", err) + } + return true, nil +} diff --git a/backend/internal/storage/s3_test.go b/backend/internal/storage/s3_test.go new file mode 100644 index 0000000..215f04e --- /dev/null +++ b/backend/internal/storage/s3_test.go @@ -0,0 +1,211 @@ +package storage + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// MockS3Client is a mock implementation of the S3 client interface +type MockS3Client struct { + mock.Mock +} + +func (m *MockS3Client) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) { + args := m.Called(ctx, params) + return args.Get(0).(*s3.PutObjectOutput), args.Error(1) +} + +func (m *MockS3Client) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) { + args := m.Called(ctx, params) + return args.Get(0).(*s3.GetObjectOutput), args.Error(1) +} + +func (m *MockS3Client) DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) { + args := m.Called(ctx, params) + return args.Get(0).(*s3.DeleteObjectOutput), args.Error(1) +} + +func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { + args := m.Called(ctx, params) + return args.Get(0).(*s3.ListObjectsV2Output), args.Error(1) +} + +func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { + args := m.Called(ctx, params) + return args.Get(0).(*s3.HeadObjectOutput), args.Error(1) +} + +func TestS3Storage(t *testing.T) { + ctx := context.Background() + mockClient := new(MockS3Client) + storage := NewS3Storage(mockClient, "test-bucket", "", false) + + t.Run("Save", func(t *testing.T) { + mockClient.ExpectedCalls = nil + mockClient.Calls = nil + + content := []byte("test content") + reader := bytes.NewReader(content) + + // Mock HeadObject to return NotFound error + mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" + })).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{ + Message: aws.String("The specified key does not exist."), + }) + + mockClient.On("PutObject", ctx, mock.MatchedBy(func(input *s3.PutObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.ContentType) == "text/plain" + })).Return(&s3.PutObjectOutput{}, nil) + + fileInfo, err := storage.Save(ctx, "test.txt", "text/plain", reader) + require.NoError(t, err) + assert.NotEmpty(t, fileInfo.ID) + assert.Equal(t, "test.txt", fileInfo.Name) + assert.Equal(t, "text/plain", fileInfo.ContentType) + + mockClient.AssertExpectations(t) + }) + + t.Run("Get", func(t *testing.T) { + content := []byte("test content") + mockClient.On("GetObject", ctx, mock.MatchedBy(func(input *s3.GetObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "test-id" + })).Return(&s3.GetObjectOutput{ + Body: io.NopCloser(bytes.NewReader(content)), + ContentType: aws.String("text/plain"), + ContentLength: aws.Int64(int64(len(content))), + LastModified: aws.Time(time.Now()), + }, nil) + + readCloser, info, err := storage.Get(ctx, "test-id") + require.NoError(t, err) + defer readCloser.Close() + + data, err := io.ReadAll(readCloser) + require.NoError(t, err) + assert.Equal(t, content, data) + assert.Equal(t, "test-id", info.ID) + assert.Equal(t, int64(len(content)), info.Size) + + mockClient.AssertExpectations(t) + }) + + t.Run("List", func(t *testing.T) { + mockClient.ExpectedCalls = nil + mockClient.Calls = nil + + mockClient.On("ListObjectsV2", ctx, mock.MatchedBy(func(input *s3.ListObjectsV2Input) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Prefix) == "test" && + aws.ToInt32(input.MaxKeys) == 10 + })).Return(&s3.ListObjectsV2Output{ + Contents: []types.Object{ + { + Key: aws.String("test1"), + Size: aws.Int64(100), + LastModified: aws.Time(time.Now()), + }, + { + Key: aws.String("test2"), + Size: aws.Int64(200), + LastModified: aws.Time(time.Now()), + }, + }, + }, nil) + + // Mock HeadObject for both files + mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "test1" + })).Return(&s3.HeadObjectOutput{ + ContentType: aws.String("text/plain"), + Metadata: map[string]string{ + "x-amz-meta-original-name": "test1.txt", + }, + }, nil).Once() + + mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "test2" + })).Return(&s3.HeadObjectOutput{ + ContentType: aws.String("text/plain"), + Metadata: map[string]string{ + "x-amz-meta-original-name": "test2.txt", + }, + }, nil).Once() + + files, err := storage.List(ctx, "test", 10, 0) + require.NoError(t, err) + assert.Len(t, files, 2) + assert.Equal(t, "test1", files[0].ID) + assert.Equal(t, int64(100), files[0].Size) + assert.Equal(t, "test1.txt", files[0].Name) + assert.Equal(t, "text/plain", files[0].ContentType) + + mockClient.AssertExpectations(t) + }) + + t.Run("Delete", func(t *testing.T) { + mockClient.On("DeleteObject", ctx, mock.MatchedBy(func(input *s3.DeleteObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "test-id" + })).Return(&s3.DeleteObjectOutput{}, nil) + + err := storage.Delete(ctx, "test-id") + require.NoError(t, err) + + mockClient.AssertExpectations(t) + }) + + t.Run("Exists", func(t *testing.T) { + mockClient.ExpectedCalls = nil + mockClient.Calls = nil + + // Mock HeadObject for existing file + mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "test-id" + })).Return(&s3.HeadObjectOutput{}, nil).Once() + + exists, err := storage.Exists(ctx, "test-id") + require.NoError(t, err) + assert.True(t, exists) + + // Mock HeadObject for non-existing file + mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool { + return aws.ToString(input.Bucket) == "test-bucket" && + aws.ToString(input.Key) == "non-existent" + })).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{ + Message: aws.String("The specified key does not exist."), + }).Once() + + exists, err = storage.Exists(ctx, "non-existent") + require.NoError(t, err) + assert.False(t, exists) + + mockClient.AssertExpectations(t) + }) + + t.Run("Custom URL", func(t *testing.T) { + customStorage := &S3Storage{ + client: mockClient, + bucket: "test-bucket", + customURL: "https://custom.domain", + proxyS3: true, + } + assert.Contains(t, customStorage.getObjectURL("test-id"), "https://custom.domain") + }) +} diff --git a/backend/internal/storage/storage.go b/backend/internal/storage/storage.go new file mode 100644 index 0000000..ccd00d2 --- /dev/null +++ b/backend/internal/storage/storage.go @@ -0,0 +1,38 @@ +package storage + +//go:generate mockgen -source=storage.go -destination=mock/mock_storage.go -package=mock + +import ( + "context" + "io" + "time" +) + +// FileInfo represents metadata about a stored file +type FileInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Size int64 `json:"size"` + ContentType string `json:"content_type"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + URL string `json:"url"` +} + +// Storage defines the interface for file storage operations +type Storage interface { + // Save stores a file and returns its FileInfo + Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) + + // Get retrieves a file by its ID + Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) + + // Delete removes a file by its ID + Delete(ctx context.Context, id string) error + + // List returns a list of files with optional prefix + List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) + + // Exists checks if a file exists + Exists(ctx context.Context, id string) (bool, error) +} diff --git a/backend/internal/testutil/db.go b/backend/internal/testutil/db.go new file mode 100644 index 0000000..1337383 --- /dev/null +++ b/backend/internal/testutil/db.go @@ -0,0 +1,57 @@ +package testutil + +import ( + "context" + "os" + "path/filepath" + "testing" + + "entgo.io/ent/dialect" + "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/require" + + "tss-rocks-be/ent" +) + +// SetupTestDB creates a new test database and returns a client +func SetupTestDB(t *testing.T) *ent.Client { + // Create a temporary SQLite database for testing + dir := t.TempDir() + dbPath := filepath.Join(dir, "test.db") + + client, err := ent.Open(dialect.SQLite, "file:"+dbPath+"?mode=memory&cache=shared&_fk=1") + require.NoError(t, err) + + // Run the auto migration tool + err = client.Schema.Create(context.Background()) + require.NoError(t, err) + + // Clean up the database after the test + t.Cleanup(func() { + client.Close() + os.Remove(dbPath) + }) + + return client +} + +// CleanupTestDB removes all data from the test database +func CleanupTestDB(t *testing.T, client *ent.Client) { + ctx := context.Background() + + // Delete all data in reverse order of dependencies + _, err := client.Permission.Delete().Exec(ctx) + require.NoError(t, err) + + _, err = client.Role.Delete().Exec(ctx) + require.NoError(t, err) + + _, err = client.User.Delete().Exec(ctx) + require.NoError(t, err) +} + +// IsSQLiteConstraintError checks if the error is a SQLite constraint error +func IsSQLiteConstraintError(err error) bool { + sqliteErr, ok := err.(sqlite3.Error) + return ok && sqliteErr.Code == sqlite3.ErrConstraint +} diff --git a/backend/internal/testutil/mock.go b/backend/internal/testutil/mock.go new file mode 100644 index 0000000..5fd6929 --- /dev/null +++ b/backend/internal/testutil/mock.go @@ -0,0 +1,32 @@ +package testutil + +import ( + "io" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// MockReadCloser is a mock implementation of io.ReadCloser +type MockReadCloser struct { + io.Reader + CloseFunc func() error +} + +func (m MockReadCloser) Close() error { + if m.CloseFunc != nil { + return m.CloseFunc() + } + return nil +} + +// NewMockReadCloser creates a new MockReadCloser with the given content +func NewMockReadCloser(content string) io.ReadCloser { + return MockReadCloser{Reader: strings.NewReader(content)} +} + +// RequireMockEquals asserts that two mocks are equal +func RequireMockEquals(t *testing.T, expected, actual interface{}) { + require.Equal(t, expected, actual) +} diff --git a/backend/internal/testutil/testutil.go b/backend/internal/testutil/testutil.go new file mode 100644 index 0000000..37432e4 --- /dev/null +++ b/backend/internal/testutil/testutil.go @@ -0,0 +1,70 @@ +package testutil + +import ( + "bytes" + "encoding/json" + "io" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "tss-rocks-be/ent" + "tss-rocks-be/ent/enttest" +) + +// SetupTestRouter returns a new Gin engine for testing +func SetupTestRouter() *gin.Engine { + gin.SetMode(gin.TestMode) + return gin.New() +} + +// NewTestClient creates a new ent client for testing +func NewTestClient() *ent.Client { + client := enttest.Open(testing.TB(nil), "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") + return client +} + +// MakeTestRequest performs a test HTTP request and returns the response +func MakeTestRequest(t *testing.T, router *gin.Engine, method, path string, body interface{}) *httptest.ResponseRecorder { + var reqBody io.Reader + if body != nil { + jsonBytes, err := json.Marshal(body) + require.NoError(t, err) + reqBody = bytes.NewBuffer(jsonBytes) + } + + req := httptest.NewRequest(method, path, reqBody) + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + return w +} + +// AssertResponse asserts the HTTP response status code and body +func AssertResponse(t *testing.T, w *httptest.ResponseRecorder, expectedStatus int, expectedBody interface{}) { + assert.Equal(t, expectedStatus, w.Code) + + if expectedBody != nil { + var actualBody interface{} + err := json.Unmarshal(w.Body.Bytes(), &actualBody) + require.NoError(t, err) + assert.Equal(t, expectedBody, actualBody) + } +} + +// AssertErrorResponse asserts an error response with a specific message +func AssertErrorResponse(t *testing.T, w *httptest.ResponseRecorder, expectedStatus int, expectedMessage string) { + assert.Equal(t, expectedStatus, w.Code) + + var response struct { + Error string `json:"error"` + } + err := json.Unmarshal(w.Body.Bytes(), &response) + require.NoError(t, err) + assert.Equal(t, expectedMessage, response.Error) +} diff --git a/backend/internal/types/config.go b/backend/internal/types/config.go new file mode 100644 index 0000000..f2549df --- /dev/null +++ b/backend/internal/types/config.go @@ -0,0 +1,41 @@ +package types + +// RateLimitConfig 限流配置 +type RateLimitConfig struct { + IPRate int `yaml:"ip_rate"` // IP限流速率 + IPBurst int `yaml:"ip_burst"` // IP突发请求数 + + RouteRates map[string]struct { + Rate int `yaml:"rate"` // 路由限流速率 + Burst int `yaml:"burst"` // 路由突发请求数 + } `yaml:"route_rates"` +} + +// AccessLogConfig 访问日志配置 +type AccessLogConfig struct { + // 是否启用控制台输出 + EnableConsole bool `yaml:"enable_console"` + // 是否启用文件日志 + EnableFile bool `yaml:"enable_file"` + // 日志文件路径 + FilePath string `yaml:"file_path"` + // 日志格式 (json 或 text) + Format string `yaml:"format"` + // 日志级别 + Level string `yaml:"level"` + // 日志轮转配置 + Rotation struct { + MaxSize int `yaml:"max_size"` // 每个日志文件的最大大小(MB) + MaxAge int `yaml:"max_age"` // 保留旧日志文件的最大天数 + MaxBackups int `yaml:"max_backups"` // 保留的旧日志文件的最大数量 + Compress bool `yaml:"compress"` // 是否压缩旧日志文件 + LocalTime bool `yaml:"local_time"` // 使用本地时间作为轮转时间 + } `yaml:"rotation"` +} + +// UploadConfig 文件上传配置 +type UploadConfig struct { + MaxSize int `yaml:"max_size"` // 最大文件大小(MB) + AllowedTypes []string `yaml:"allowed_types"` // 允许的MIME类型 + AllowedExtensions []string `yaml:"allowed_extensions"` // 允许的文件扩展名 +} diff --git a/backend/internal/types/config_test.go b/backend/internal/types/config_test.go new file mode 100644 index 0000000..298e318 --- /dev/null +++ b/backend/internal/types/config_test.go @@ -0,0 +1,116 @@ +package types + +import ( + "testing" +) + +func TestRateLimitConfig(t *testing.T) { + config := RateLimitConfig{ + IPRate: 100, + IPBurst: 200, + RouteRates: map[string]struct { + Rate int `yaml:"rate"` + Burst int `yaml:"burst"` + }{ + "/api/test": { + Rate: 50, + Burst: 100, + }, + }, + } + + if config.IPRate != 100 { + t.Errorf("Expected IPRate 100, got %d", config.IPRate) + } + if config.IPBurst != 200 { + t.Errorf("Expected IPBurst 200, got %d", config.IPBurst) + } + + route := config.RouteRates["/api/test"] + if route.Rate != 50 { + t.Errorf("Expected route rate 50, got %d", route.Rate) + } + if route.Burst != 100 { + t.Errorf("Expected route burst 100, got %d", route.Burst) + } +} + +func TestAccessLogConfig(t *testing.T) { + config := AccessLogConfig{ + EnableConsole: true, + EnableFile: true, + FilePath: "/var/log/app.log", + Format: "json", + Level: "info", + Rotation: struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxBackups int `yaml:"max_backups"` + Compress bool `yaml:"compress"` + LocalTime bool `yaml:"local_time"` + }{ + MaxSize: 100, + MaxAge: 7, + MaxBackups: 5, + Compress: true, + LocalTime: true, + }, + } + + if !config.EnableConsole { + t.Error("Expected EnableConsole to be true") + } + if !config.EnableFile { + t.Error("Expected EnableFile to be true") + } + if config.FilePath != "/var/log/app.log" { + t.Errorf("Expected FilePath '/var/log/app.log', got '%s'", config.FilePath) + } + if config.Format != "json" { + t.Errorf("Expected Format 'json', got '%s'", config.Format) + } + if config.Level != "info" { + t.Errorf("Expected Level 'info', got '%s'", config.Level) + } + + rotation := config.Rotation + if rotation.MaxSize != 100 { + t.Errorf("Expected MaxSize 100, got %d", rotation.MaxSize) + } + if rotation.MaxAge != 7 { + t.Errorf("Expected MaxAge 7, got %d", rotation.MaxAge) + } + if rotation.MaxBackups != 5 { + t.Errorf("Expected MaxBackups 5, got %d", rotation.MaxBackups) + } + if !rotation.Compress { + t.Error("Expected Compress to be true") + } + if !rotation.LocalTime { + t.Error("Expected LocalTime to be true") + } +} + +func TestUploadConfig(t *testing.T) { + config := UploadConfig{ + MaxSize: 10, + AllowedTypes: []string{"image/jpeg", "image/png"}, + AllowedExtensions: []string{".jpg", ".png"}, + } + + if config.MaxSize != 10 { + t.Errorf("Expected MaxSize 10, got %d", config.MaxSize) + } + if len(config.AllowedTypes) != 2 { + t.Errorf("Expected 2 AllowedTypes, got %d", len(config.AllowedTypes)) + } + if config.AllowedTypes[0] != "image/jpeg" { + t.Errorf("Expected AllowedTypes[0] 'image/jpeg', got '%s'", config.AllowedTypes[0]) + } + if len(config.AllowedExtensions) != 2 { + t.Errorf("Expected 2 AllowedExtensions, got %d", len(config.AllowedExtensions)) + } + if config.AllowedExtensions[0] != ".jpg" { + t.Errorf("Expected AllowedExtensions[0] '.jpg', got '%s'", config.AllowedExtensions[0]) + } +} diff --git a/backend/internal/types/file.go b/backend/internal/types/file.go new file mode 100644 index 0000000..2abf8a1 --- /dev/null +++ b/backend/internal/types/file.go @@ -0,0 +1,8 @@ +package types + +// FileInfo represents metadata about a file +type FileInfo struct { + Size int64 + Name string + ContentType string +} diff --git a/backend/internal/types/file_test.go b/backend/internal/types/file_test.go new file mode 100644 index 0000000..1335a21 --- /dev/null +++ b/backend/internal/types/file_test.go @@ -0,0 +1,21 @@ +package types + +import "testing" + +func TestFileInfo(t *testing.T) { + fileInfo := FileInfo{ + Size: 1024, + Name: "test.jpg", + ContentType: "image/jpeg", + } + + if fileInfo.Size != 1024 { + t.Errorf("Expected Size 1024, got %d", fileInfo.Size) + } + if fileInfo.Name != "test.jpg" { + t.Errorf("Expected Name 'test.jpg', got '%s'", fileInfo.Name) + } + if fileInfo.ContentType != "image/jpeg" { + t.Errorf("Expected ContentType 'image/jpeg', got '%s'", fileInfo.ContentType) + } +} diff --git a/backend/internal/types/types.go b/backend/internal/types/types.go new file mode 100644 index 0000000..197e22f --- /dev/null +++ b/backend/internal/types/types.go @@ -0,0 +1,43 @@ +package types + +import "errors" + +// Common errors +var ( + ErrNotFound = errors.New("not found") +) + +// Category represents a category in the system +type Category struct { + ID int `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Description *string `json:"description,omitempty"` +} + +// Post represents a blog post +type Post struct { + ID int `json:"id"` + Title string `json:"title"` + Slug string `json:"slug"` + ContentMarkdown string `json:"content_markdown"` + Summary string `json:"summary"` + MetaKeywords *string `json:"meta_keywords,omitempty"` + MetaDescription *string `json:"meta_description,omitempty"` +} + +// Contributor represents a contributor to the blog +type Contributor struct { + ID int `json:"id"` + Name string `json:"name"` + AvatarURL *string `json:"avatar_url,omitempty"` + Bio *string `json:"bio,omitempty"` +} + +// Daily represents a daily quote or message +type Daily struct { + ID string `json:"id"` + CategoryID int `json:"category_id"` + ImageURL string `json:"image_url"` + Quote string `json:"quote"` +} diff --git a/backend/internal/types/types_test.go b/backend/internal/types/types_test.go new file mode 100644 index 0000000..ff3461d --- /dev/null +++ b/backend/internal/types/types_test.go @@ -0,0 +1,77 @@ +package types + +import ( + "testing" +) + +func TestCategory(t *testing.T) { + description := "Test Description" + category := Category{ + ID: 1, + Name: "Test Category", + Slug: "test-category", + Description: &description, + } + + if category.ID != 1 { + t.Errorf("Expected ID 1, got %d", category.ID) + } + if category.Name != "Test Category" { + t.Errorf("Expected name 'Test Category', got '%s'", category.Name) + } + if category.Slug != "test-category" { + t.Errorf("Expected slug 'test-category', got '%s'", category.Slug) + } + if *category.Description != description { + t.Errorf("Expected description '%s', got '%s'", description, *category.Description) + } +} + +func TestPost(t *testing.T) { + metaKeywords := "test,blog" + metaDesc := "Test Description" + post := Post{ + ID: 1, + Title: "Test Post", + Slug: "test-post", + ContentMarkdown: "# Test Content", + Summary: "Test Summary", + MetaKeywords: &metaKeywords, + MetaDescription: &metaDesc, + } + + if post.ID != 1 { + t.Errorf("Expected ID 1, got %d", post.ID) + } + if post.Title != "Test Post" { + t.Errorf("Expected title 'Test Post', got '%s'", post.Title) + } + if post.Slug != "test-post" { + t.Errorf("Expected slug 'test-post', got '%s'", post.Slug) + } + if *post.MetaKeywords != metaKeywords { + t.Errorf("Expected meta keywords '%s', got '%s'", metaKeywords, *post.MetaKeywords) + } +} + +func TestDaily(t *testing.T) { + daily := Daily{ + ID: "2025-02-12", + CategoryID: 1, + ImageURL: "https://example.com/image.jpg", + Quote: "Test Quote", + } + + if daily.ID != "2025-02-12" { + t.Errorf("Expected ID '2025-02-12', got '%s'", daily.ID) + } + if daily.CategoryID != 1 { + t.Errorf("Expected CategoryID 1, got %d", daily.CategoryID) + } + if daily.ImageURL != "https://example.com/image.jpg" { + t.Errorf("Expected ImageURL 'https://example.com/image.jpg', got '%s'", daily.ImageURL) + } + if daily.Quote != "Test Quote" { + t.Errorf("Expected Quote 'Test Quote', got '%s'", daily.Quote) + } +} diff --git a/backend/pkg/config/config.go b/backend/pkg/config/config.go new file mode 100644 index 0000000..b80a1f2 --- /dev/null +++ b/backend/pkg/config/config.go @@ -0,0 +1,44 @@ +package config + +import ( + "os" + + "gopkg.in/yaml.v3" +) + +type Config struct { + Database struct { + Driver string `yaml:"driver"` + DSN string `yaml:"dsn"` + } `yaml:"database"` + + Server struct { + Port int `yaml:"port"` + Host string `yaml:"host"` + } `yaml:"server"` + + JWT struct { + Secret string `yaml:"secret"` + Expiration string `yaml:"expiration"` + } `yaml:"jwt"` + + Logging struct { + Level string `yaml:"level"` + Format string `yaml:"format"` + } `yaml:"logging"` +} + +// Load loads the configuration from the specified file +func Load(path string) (*Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var cfg Config + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/backend/pkg/config/config_test.go b/backend/pkg/config/config_test.go new file mode 100644 index 0000000..a785dff --- /dev/null +++ b/backend/pkg/config/config_test.go @@ -0,0 +1,77 @@ +package config + +import ( + "os" + "path/filepath" + "testing" +) + +func TestLoad(t *testing.T) { + // Create a temporary test config file + testConfig := ` +database: + driver: postgres + dsn: postgres://user:pass@localhost:5432/db +server: + port: 8080 + host: localhost +jwt: + secret: test-secret + expiration: 24h +logging: + level: debug + format: console +` + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "config.yaml") + if err := os.WriteFile(configPath, []byte(testConfig), 0644); err != nil { + t.Fatalf("Failed to create test config file: %v", err) + } + + // Test successful config loading + cfg, err := Load(configPath) + if err != nil { + t.Fatalf("Failed to load config: %v", err) + } + + // Verify loaded values + tests := []struct { + name string + got interface{} + expected interface{} + }{ + {"database.driver", cfg.Database.Driver, "postgres"}, + {"database.dsn", cfg.Database.DSN, "postgres://user:pass@localhost:5432/db"}, + {"server.port", cfg.Server.Port, 8080}, + {"server.host", cfg.Server.Host, "localhost"}, + {"jwt.secret", cfg.JWT.Secret, "test-secret"}, + {"jwt.expiration", cfg.JWT.Expiration, "24h"}, + {"logging.level", cfg.Logging.Level, "debug"}, + {"logging.format", cfg.Logging.Format, "console"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.got != tt.expected { + t.Errorf("Config %s = %v, want %v", tt.name, tt.got, tt.expected) + } + }) + } + + // Test loading non-existent file + _, err = Load("non-existent.yaml") + if err == nil { + t.Error("Expected error when loading non-existent file, got nil") + } + + // Test loading invalid YAML + invalidPath := filepath.Join(tmpDir, "invalid.yaml") + if err := os.WriteFile(invalidPath, []byte("invalid: yaml: content"), 0644); err != nil { + t.Fatalf("Failed to create invalid config file: %v", err) + } + + _, err = Load(invalidPath) + if err == nil { + t.Error("Expected error when loading invalid YAML, got nil") + } +} diff --git a/backend/pkg/imageutil/processor.go b/backend/pkg/imageutil/processor.go new file mode 100644 index 0000000..d22be5c --- /dev/null +++ b/backend/pkg/imageutil/processor.go @@ -0,0 +1,59 @@ +package imageutil + +import ( + "bytes" + "fmt" + "image" + _ "image/gif" + _ "image/jpeg" + _ "image/png" + "io" + + "github.com/chai2010/webp" +) + +type ProcessOptions struct { + Lossless bool + Quality float32 // 0-100 + Compression int // 0-6 +} + +// DefaultOptions returns default processing options +func DefaultOptions() ProcessOptions { + return ProcessOptions{ + Lossless: true, + Quality: 90, + Compression: 4, + } +} + +// ProcessImage converts any supported image to WebP format +func ProcessImage(input io.Reader, opts ProcessOptions) ([]byte, error) { + // Decode the original image + img, _, err := image.Decode(input) + if err != nil { + return nil, fmt.Errorf("failed to decode image: %w", err) + } + + // Encode to WebP + var buf bytes.Buffer + if err := webp.Encode(&buf, img, &webp.Options{ + Lossless: opts.Lossless, + Quality: opts.Quality, + Exact: true, + }); err != nil { + return nil, fmt.Errorf("failed to encode to WebP: %w", err) + } + + return buf.Bytes(), nil +} + +// IsImageFormat checks if the given format is a supported image format +func IsImageFormat(contentType string) bool { + switch contentType { + case "image/jpeg", "image/png", "image/gif", "image/webp": + return true + default: + return false + } +} diff --git a/backend/pkg/imageutil/processor_test.go b/backend/pkg/imageutil/processor_test.go new file mode 100644 index 0000000..c5cb6d5 --- /dev/null +++ b/backend/pkg/imageutil/processor_test.go @@ -0,0 +1,100 @@ +package imageutil + +import ( + "bytes" + "image" + "image/color" + "image/png" + "testing" +) + +func TestIsImageFormat(t *testing.T) { + tests := []struct { + name string + contentType string + want bool + }{ + {"JPEG", "image/jpeg", true}, + {"PNG", "image/png", true}, + {"GIF", "image/gif", true}, + {"WebP", "image/webp", true}, + {"Invalid", "image/invalid", false}, + {"Empty", "", false}, + {"Text", "text/plain", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsImageFormat(tt.contentType); got != tt.want { + t.Errorf("IsImageFormat(%q) = %v, want %v", tt.contentType, got, tt.want) + } + }) + } +} + +func TestDefaultOptions(t *testing.T) { + opts := DefaultOptions() + + if !opts.Lossless { + t.Error("DefaultOptions().Lossless = false, want true") + } + if opts.Quality != 90 { + t.Errorf("DefaultOptions().Quality = %v, want 90", opts.Quality) + } + if opts.Compression != 4 { + t.Errorf("DefaultOptions().Compression = %v, want 4", opts.Compression) + } +} + +func TestProcessImage(t *testing.T) { + // Create a test image + img := image.NewRGBA(image.Rect(0, 0, 100, 100)) + for y := 0; y < 100; y++ { + for x := 0; x < 100; x++ { + img.Set(x, y, color.RGBA{R: 255, G: 0, B: 0, A: 255}) + } + } + + var buf bytes.Buffer + if err := png.Encode(&buf, img); err != nil { + t.Fatalf("Failed to create test PNG: %v", err) + } + + tests := []struct { + name string + opts ProcessOptions + wantErr bool + }{ + { + name: "Default options", + opts: DefaultOptions(), + }, + { + name: "Custom quality", + opts: ProcessOptions{ + Lossless: false, + Quality: 75, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := bytes.NewReader(buf.Bytes()) + result, err := ProcessImage(reader, tt.opts) + if (err != nil) != tt.wantErr { + t.Errorf("ProcessImage() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && len(result) == 0 { + t.Error("ProcessImage() returned empty result") + } + }) + } + + // Test with invalid input + _, err := ProcessImage(bytes.NewReader([]byte("invalid image data")), DefaultOptions()) + if err == nil { + t.Error("ProcessImage() with invalid input should return error") + } +} diff --git a/backend/pkg/logger/logger.go b/backend/pkg/logger/logger.go new file mode 100644 index 0000000..76e52b7 --- /dev/null +++ b/backend/pkg/logger/logger.go @@ -0,0 +1,51 @@ +package logger + +import ( + "os" + "strings" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "tss-rocks-be/internal/config" +) + +// Setup configures the logger based on the provided configuration +func Setup(cfg *config.Config) { + // Set log level + level := zerolog.InfoLevel + if cfg.Logging.Level != "" { + switch strings.ToLower(cfg.Logging.Level) { + case "debug": + level = zerolog.DebugLevel + case "info": + level = zerolog.InfoLevel + case "warn": + level = zerolog.WarnLevel + case "error": + level = zerolog.ErrorLevel + case "fatal": + level = zerolog.FatalLevel + case "panic": + level = zerolog.PanicLevel + } + } + zerolog.SetGlobalLevel(level) + + // Configure output format + if cfg.Logging.Format == "console" { + log.Logger = log.Output(zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: time.RFC3339, + }) + } else { + // Use JSON format by default + zerolog.TimeFieldFormat = time.RFC3339 + log.Logger = zerolog.New(os.Stdout).With().Timestamp().Logger() + } +} + +// GetLogger returns the global logger instance +func GetLogger() *zerolog.Logger { + return &log.Logger +} diff --git a/backend/pkg/logger/logger_test.go b/backend/pkg/logger/logger_test.go new file mode 100644 index 0000000..319d851 --- /dev/null +++ b/backend/pkg/logger/logger_test.go @@ -0,0 +1,85 @@ +package logger + +import ( + "testing" + "tss-rocks-be/internal/config" + + "github.com/rs/zerolog" +) + +func TestSetup(t *testing.T) { + tests := []struct { + name string + config *config.Config + expectedLevel zerolog.Level + }{ + { + name: "Debug level", + config: &config.Config{ + Logging: struct { + Level string `yaml:"level"` + Format string `yaml:"format"` + }{ + Level: "debug", + Format: "json", + }, + }, + expectedLevel: zerolog.DebugLevel, + }, + { + name: "Info level", + config: &config.Config{ + Logging: struct { + Level string `yaml:"level"` + Format string `yaml:"format"` + }{ + Level: "info", + Format: "json", + }, + }, + expectedLevel: zerolog.InfoLevel, + }, + { + name: "Error level", + config: &config.Config{ + Logging: struct { + Level string `yaml:"level"` + Format string `yaml:"format"` + }{ + Level: "error", + Format: "json", + }, + }, + expectedLevel: zerolog.ErrorLevel, + }, + { + name: "Invalid level defaults to Info", + config: &config.Config{ + Logging: struct { + Level string `yaml:"level"` + Format string `yaml:"format"` + }{ + Level: "invalid", + Format: "json", + }, + }, + expectedLevel: zerolog.InfoLevel, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + Setup(tt.config) + if zerolog.GlobalLevel() != tt.expectedLevel { + t.Errorf("Setup() set level to %v, want %v", zerolog.GlobalLevel(), tt.expectedLevel) + } + }) + } +} + +func TestGetLogger(t *testing.T) { + logger := GetLogger() + if logger == nil { + t.Error("GetLogger() returned nil") + } +} diff --git a/backend/scripts/bundle-openapi.ps1 b/backend/scripts/bundle-openapi.ps1 new file mode 100644 index 0000000..a00acb6 --- /dev/null +++ b/backend/scripts/bundle-openapi.ps1 @@ -0,0 +1,29 @@ +# Bundle OpenAPI schema files into a single file +$ErrorActionPreference = "Stop" + +# Ensure we're in the project root +$projectRoot = Split-Path -Parent $PSScriptRoot +Set-Location $projectRoot + +# Create dist directory if it doesn't exist +if (-not (Test-Path "api\dist")) { + New-Item -ItemType Directory -Force -Path "api\dist" +} + +# Bundle the OpenAPI files +swagger-cli bundle api/schemas/openapi.yaml --outfile api/dist/openapi.yaml --type yaml + +# Check if bundling was successful +if ($LASTEXITCODE -eq 0) { +} else { + Write-Error "Failed to bundle OpenAPI schema" + exit 1 +} + +# Also create a JSON version for tools that prefer JSON +swagger-cli bundle api/schemas/openapi.yaml --outfile api/dist/openapi.json --type json + +if ($LASTEXITCODE -eq 0) { +} else { + exit 1 +} diff --git a/backend/scripts/bundle-openapi.sh b/backend/scripts/bundle-openapi.sh new file mode 100644 index 0000000..aa56b90 --- /dev/null +++ b/backend/scripts/bundle-openapi.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# Get the directory where the script is located +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Change to project root +cd "$PROJECT_ROOT" + +# Create dist directory if it doesn't exist +mkdir -p api/dist + +# Bundle the OpenAPI files +echo "Bundling OpenAPI schema to YAML..." +swagger-cli bundle api/schemas/openapi.yaml --outfile api/dist/openapi.yaml --type yaml + +# Create JSON version +echo "Creating JSON version..." +swagger-cli bundle api/schemas/openapi.yaml --outfile api/dist/openapi.json --type json + +echo "OpenAPI schema bundling complete!" diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/frontend/data/i18n/en.json b/frontend/data/i18n/en.json new file mode 100644 index 0000000..5e8d59a --- /dev/null +++ b/frontend/data/i18n/en.json @@ -0,0 +1,24 @@ +{ + "categories": { + "man": "Man", + "machine": "Machine", + "earth": "Earth", + "space": "Space", + "futures": "Futures", + "exclusive": "Exclusive" + }, + "nav": { + "home": "Home", + "daily": "Daily", + "about": "About", + "search": "Search" + }, + "theme": { + "light": "Light Mode", + "dark": "Dark Mode", + "system": "System Mode" + }, + "footer": { + "copyright": "TSS Rocks. All rights reserved." + } +} diff --git a/frontend/data/i18n/zh-Hans.json b/frontend/data/i18n/zh-Hans.json new file mode 100644 index 0000000..b1e2848 --- /dev/null +++ b/frontend/data/i18n/zh-Hans.json @@ -0,0 +1,24 @@ +{ + "categories": { + "man": "人类", + "machine": "机器", + "earth": "地球", + "space": "太空", + "futures": "未来", + "exclusive": "独家" + }, + "nav": { + "home": "首页", + "daily": "每日", + "about": "关于", + "search": "搜索" + }, + "theme": { + "light": "浅色模式", + "dark": "深色模式", + "system": "跟随系统" + }, + "footer": { + "copyright": "TSS.Rocks. 版权所有。" + } +} diff --git a/frontend/data/i18n/zh-Hant.json b/frontend/data/i18n/zh-Hant.json new file mode 100644 index 0000000..988497f --- /dev/null +++ b/frontend/data/i18n/zh-Hant.json @@ -0,0 +1,24 @@ +{ + "categories": { + "man": "人類", + "machine": "機器", + "earth": "地球", + "space": "太空", + "futures": "未來", + "exclusive": "獨家" + }, + "nav": { + "home": "首頁", + "daily": "每日", + "about": "關於", + "search": "搜尋" + }, + "theme": { + "light": "淺色模式", + "dark": "深色模式", + "system": "跟隨系統" + }, + "footer": { + "copyright": "TSS.Rocks. 版權所有。" + } +} diff --git a/frontend/eslint.config.js b/frontend/eslint.config.js new file mode 100644 index 0000000..82c2e20 --- /dev/null +++ b/frontend/eslint.config.js @@ -0,0 +1,28 @@ +import js from '@eslint/js'; +import globals from 'globals'; +import reactHooks from 'eslint-plugin-react-hooks'; +import reactRefresh from 'eslint-plugin-react-refresh'; +import tseslint from 'typescript-eslint'; + +export default tseslint.config( + { ignores: ['dist'] }, + { + extends: [js.configs.recommended, ...tseslint.configs.recommended], + files: ['**/*.{ts,tsx}'], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + plugins: { + 'react-hooks': reactHooks, + 'react-refresh': reactRefresh, + }, + rules: { + ...reactHooks.configs.recommended.rules, + 'react-refresh/only-export-components': [ + 'warn', + { allowConstantExport: true }, + ], + }, + } +); diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..18b3654 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,24 @@ + + + + + + TSS.Rocks + + + + + + + + + + + + + + +
+ + + diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..0ad7daa --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,44 @@ +{ + "name": "@tss-rocks/frontend", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "@tss-rocks/api": "workspace:*", + "@headlessui/react": "^2.2.0", + "@types/markdown-it": "^14.1.2", + "i18next": "^24.2.2", + "lucide-react": "^0.474.0", + "markdown-it": "^14.1.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-i18next": "^15.4.0", + "react-icons": "^5.4.0", + "react-router-dom": "^7.1.5" + }, + "devDependencies": { + "@eslint/js": "^9.9.1", + "@tailwindcss/postcss": "^4.0.3", + "@tailwindcss/typography": "^0.5.16", + "@tailwindcss/vite": "^4.0.3", + "@types/react": "^19.0.8", + "@types/react-dom": "^19.0.3", + "@vitejs/plugin-react": "^4.3.1", + "autoprefixer": "^10.4.18", + "eslint": "^9.9.1", + "eslint-plugin-react-hooks": "^5.1.0-rc.0", + "eslint-plugin-react-refresh": "^0.4.11", + "globals": "^15.9.0", + "postcss": "^8.4.35", + "tailwindcss": "^4.0.3", + "typescript": "^5.5.3", + "typescript-eslint": "^8.3.0", + "vite": "^6.1.0" + } +} diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml new file mode 100644 index 0000000..35f9bcc --- /dev/null +++ b/frontend/pnpm-lock.yaml @@ -0,0 +1,2799 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@headlessui/react': + specifier: ^2.2.0 + version: 2.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@types/markdown-it': + specifier: ^14.1.2 + version: 14.1.2 + i18next: + specifier: ^24.2.2 + version: 24.2.2(typescript@5.7.3) + lucide-react: + specifier: ^0.474.0 + version: 0.474.0(react@19.0.0) + markdown-it: + specifier: ^14.1.0 + version: 14.1.0 + react: + specifier: ^19.0.0 + version: 19.0.0 + react-dom: + specifier: ^19.0.0 + version: 19.0.0(react@19.0.0) + react-i18next: + specifier: ^15.4.0 + version: 15.4.0(i18next@24.2.2(typescript@5.7.3))(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + react-icons: + specifier: ^5.4.0 + version: 5.4.0(react@19.0.0) + react-router-dom: + specifier: ^7.1.5 + version: 7.1.5(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + devDependencies: + '@eslint/js': + specifier: ^9.9.1 + version: 9.19.0 + '@tailwindcss/postcss': + specifier: ^4.0.3 + version: 4.0.3 + '@tailwindcss/typography': + specifier: ^0.5.16 + version: 0.5.16(tailwindcss@4.0.3) + '@tailwindcss/vite': + specifier: ^4.0.3 + version: 4.0.3(vite@6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0)) + '@types/react': + specifier: ^19.0.8 + version: 19.0.8 + '@types/react-dom': + specifier: ^19.0.3 + version: 19.0.3(@types/react@19.0.8) + '@vitejs/plugin-react': + specifier: ^4.3.1 + version: 4.3.4(vite@6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0)) + autoprefixer: + specifier: ^10.4.18 + version: 10.4.20(postcss@8.5.1) + eslint: + specifier: ^9.9.1 + version: 9.19.0(jiti@2.4.2) + eslint-plugin-react-hooks: + specifier: ^5.1.0-rc.0 + version: 5.1.0(eslint@9.19.0(jiti@2.4.2)) + eslint-plugin-react-refresh: + specifier: ^0.4.11 + version: 0.4.18(eslint@9.19.0(jiti@2.4.2)) + globals: + specifier: ^15.9.0 + version: 15.14.0 + postcss: + specifier: ^8.4.35 + version: 8.5.1 + tailwindcss: + specifier: ^4.0.3 + version: 4.0.3 + typescript: + specifier: ^5.5.3 + version: 5.7.3 + typescript-eslint: + specifier: ^8.3.0 + version: 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + vite: + specifier: ^6.1.0 + version: 6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0) + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.5': + resolution: {integrity: sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.26.7': + resolution: {integrity: sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.26.5': + resolution: {integrity: sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.26.5': + resolution: {integrity: sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.25.9': + resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.26.5': + resolution: {integrity: sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.25.9': + resolution: {integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.25.9': + resolution: {integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.25.9': + resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.26.7': + resolution: {integrity: sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.26.7': + resolution: {integrity: sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-transform-react-jsx-self@7.25.9': + resolution: {integrity: sha512-y8quW6p0WHkEhmErnfe58r7x0A70uKphQm8Sp8cV7tjNQwK56sNVK0M73LK3WuYmsuyrftut4xAkjjgU0twaMg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-source@7.25.9': + resolution: {integrity: sha512-+iqjT8xmXhhYv4/uiYd8FNQsraMFZIfxVSqxxVSZP0WbbSAWvBXAul0m/zu+7Vv4O/3WtApy9pmaTMiumEZgfg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/runtime@7.26.7': + resolution: {integrity: sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==} + engines: {node: '>=6.9.0'} + + '@babel/template@7.25.9': + resolution: {integrity: sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.26.7': + resolution: {integrity: sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.26.7': + resolution: {integrity: sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg==} + engines: {node: '>=6.9.0'} + + '@esbuild/aix-ppc64@0.24.2': + resolution: {integrity: sha512-thpVCb/rhxE/BnMLQ7GReQLLN8q9qbHmI55F4489/ByVg2aQaQ6kbcLb6FHkocZzQhxc4gx0sCk0tJkKBFzDhA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.24.2': + resolution: {integrity: sha512-cNLgeqCqV8WxfcTIOeL4OAtSmL8JjcN6m09XIgro1Wi7cF4t/THaWEa7eL5CMoMBdjoHOTh/vwTO/o2TRXIyzg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.24.2': + resolution: {integrity: sha512-tmwl4hJkCfNHwFB3nBa8z1Uy3ypZpxqxfTQOcHX+xRByyYgunVbZ9MzUUfb0RxaHIMnbHagwAxuTL+tnNM+1/Q==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.24.2': + resolution: {integrity: sha512-B6Q0YQDqMx9D7rvIcsXfmJfvUYLoP722bgfBlO5cGvNVb5V/+Y7nhBE3mHV9OpxBf4eAS2S68KZztiPaWq4XYw==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.24.2': + resolution: {integrity: sha512-kj3AnYWc+CekmZnS5IPu9D+HWtUI49hbnyqk0FLEJDbzCIQt7hg7ucF1SQAilhtYpIujfaHr6O0UHlzzSPdOeA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.24.2': + resolution: {integrity: sha512-WeSrmwwHaPkNR5H3yYfowhZcbriGqooyu3zI/3GGpF8AyUdsrrP0X6KumITGA9WOyiJavnGZUwPGvxvwfWPHIA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.24.2': + resolution: {integrity: sha512-UN8HXjtJ0k/Mj6a9+5u6+2eZ2ERD7Edt1Q9IZiB5UZAIdPnVKDoG7mdTVGhHJIeEml60JteamR3qhsr1r8gXvg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.24.2': + resolution: {integrity: sha512-TvW7wE/89PYW+IevEJXZ5sF6gJRDY/14hyIGFXdIucxCsbRmLUcjseQu1SyTko+2idmCw94TgyaEZi9HUSOe3Q==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.24.2': + resolution: {integrity: sha512-7HnAD6074BW43YvvUmE/35Id9/NB7BeX5EoNkK9obndmZBUk8xmJJeU7DwmUeN7tkysslb2eSl6CTrYz6oEMQg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.24.2': + resolution: {integrity: sha512-n0WRM/gWIdU29J57hJyUdIsk0WarGd6To0s+Y+LwvlC55wt+GT/OgkwoXCXvIue1i1sSNWblHEig00GBWiJgfA==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.24.2': + resolution: {integrity: sha512-sfv0tGPQhcZOgTKO3oBE9xpHuUqguHvSo4jl+wjnKwFpapx+vUDcawbwPNuBIAYdRAvIDBfZVvXprIj3HA+Ugw==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.24.2': + resolution: {integrity: sha512-CN9AZr8kEndGooS35ntToZLTQLHEjtVB5n7dl8ZcTZMonJ7CCfStrYhrzF97eAecqVbVJ7APOEe18RPI4KLhwQ==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.24.2': + resolution: {integrity: sha512-iMkk7qr/wl3exJATwkISxI7kTcmHKE+BlymIAbHO8xanq/TjHaaVThFF6ipWzPHryoFsesNQJPE/3wFJw4+huw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.24.2': + resolution: {integrity: sha512-shsVrgCZ57Vr2L8mm39kO5PPIb+843FStGt7sGGoqiiWYconSxwTiuswC1VJZLCjNiMLAMh34jg4VSEQb+iEbw==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.24.2': + resolution: {integrity: sha512-4eSFWnU9Hhd68fW16GD0TINewo1L6dRrB+oLNNbYyMUAeOD2yCK5KXGK1GH4qD/kT+bTEXjsyTCiJGHPZ3eM9Q==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.24.2': + resolution: {integrity: sha512-S0Bh0A53b0YHL2XEXC20bHLuGMOhFDO6GN4b3YjRLK//Ep3ql3erpNcPlEFed93hsQAjAQDNsvcK+hV90FubSw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.24.2': + resolution: {integrity: sha512-8Qi4nQcCTbLnK9WoMjdC9NiTG6/E38RNICU6sUNqK0QFxCYgoARqVqxdFmWkdonVsvGqWhmm7MO0jyTqLqwj0Q==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.24.2': + resolution: {integrity: sha512-wuLK/VztRRpMt9zyHSazyCVdCXlpHkKm34WUyinD2lzK07FAHTq0KQvZZlXikNWkDGoT6x3TD51jKQ7gMVpopw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.24.2': + resolution: {integrity: sha512-VefFaQUc4FMmJuAxmIHgUmfNiLXY438XrL4GDNV1Y1H/RW3qow68xTwjZKfj/+Plp9NANmzbH5R40Meudu8mmw==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.24.2': + resolution: {integrity: sha512-YQbi46SBct6iKnszhSvdluqDmxCJA+Pu280Av9WICNwQmMxV7nLRHZfjQzwbPs3jeWnuAhE9Jy0NrnJ12Oz+0A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.24.2': + resolution: {integrity: sha512-+iDS6zpNM6EnJyWv0bMGLWSWeXGN/HTaF/LXHXHwejGsVi+ooqDfMCCTerNFxEkM3wYVcExkeGXNqshc9iMaOA==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.24.2': + resolution: {integrity: sha512-hTdsW27jcktEvpwNHJU4ZwWFGkz2zRJUz8pvddmXPtXDzVKTTINmlmga3ZzwcuMpUvLw7JkLy9QLKyGpD2Yxig==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.24.2': + resolution: {integrity: sha512-LihEQ2BBKVFLOC9ZItT9iFprsE9tqjDjnbulhHoFxYQtQfai7qfluVODIYxt1PgdoyQkz23+01rzwNwYfutxUQ==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.24.2': + resolution: {integrity: sha512-q+iGUwfs8tncmFC9pcnD5IvRHAzmbwQ3GPS5/ceCyHdjXubwQWI12MKWSNSMYLJMq23/IUCvJMS76PDqXe1fxA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.24.2': + resolution: {integrity: sha512-7VTgWzgMGvup6aSqDPLiW5zHaxYJGTO4OokMjIlrCtf+VpEL+cXKtCvg723iguPYI5oaUNdS+/V7OU2gvXVWEg==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.4.1': + resolution: {integrity: sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.1': + resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/config-array@0.19.2': + resolution: {integrity: sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/core@0.10.0': + resolution: {integrity: sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/eslintrc@3.2.0': + resolution: {integrity: sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/js@9.19.0': + resolution: {integrity: sha512-rbq9/g38qjfqFLOVPvwjIvFFdNziEC5S65jmjPw5r6A//QH+W91akh9irMwjDN8zKUTak6W9EsAv4m/7Wnw0UQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/object-schema@2.1.6': + resolution: {integrity: sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/plugin-kit@0.2.5': + resolution: {integrity: sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@floating-ui/core@1.6.9': + resolution: {integrity: sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==} + + '@floating-ui/dom@1.6.13': + resolution: {integrity: sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==} + + '@floating-ui/react-dom@2.1.2': + resolution: {integrity: sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/react@0.26.28': + resolution: {integrity: sha512-yORQuuAtVpiRjpMhdc0wJj06b9JFjrYF4qp96j++v2NBpbi6SEGF7donUJ3TMieerQ6qVkAv1tgr7L4r5roTqw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/utils@0.2.9': + resolution: {integrity: sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==} + + '@headlessui/react@2.2.0': + resolution: {integrity: sha512-RzCEg+LXsuI7mHiSomsu/gBJSjpupm6A1qIZ5sWjd7JhARNlMiSA4kKfJpCKwU9tE+zMRterhhrP74PvfJrpXQ==} + engines: {node: '>=10'} + peerDependencies: + react: ^18 || ^19 || ^19.0.0-rc + react-dom: ^18 || ^19 || ^19.0.0-rc + + '@humanfs/core@0.19.1': + resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} + engines: {node: '>=18.18.0'} + + '@humanfs/node@0.16.6': + resolution: {integrity: sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==} + engines: {node: '>=18.18.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/retry@0.3.1': + resolution: {integrity: sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==} + engines: {node: '>=18.18'} + + '@humanwhocodes/retry@0.4.1': + resolution: {integrity: sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA==} + engines: {node: '>=18.18'} + + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@react-aria/focus@3.19.1': + resolution: {integrity: sha512-bix9Bu1Ue7RPcYmjwcjhB14BMu2qzfJ3tMQLqDc9pweJA66nOw8DThy3IfVr8Z7j2PHktOLf9kcbiZpydKHqzg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/interactions@3.23.0': + resolution: {integrity: sha512-0qR1atBIWrb7FzQ+Tmr3s8uH5mQdyRH78n0krYaG8tng9+u1JlSi8DGRSaC9ezKyNB84m7vHT207xnHXGeJ3Fg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/ssr@3.9.7': + resolution: {integrity: sha512-GQygZaGlmYjmYM+tiNBA5C6acmiDWF52Nqd40bBp0Znk4M4hP+LTmI0lpI1BuKMw45T8RIhrAsICIfKwZvi2Gg==} + engines: {node: '>= 12'} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/utils@3.27.0': + resolution: {integrity: sha512-p681OtApnKOdbeN8ITfnnYqfdHS0z7GE+4l8EXlfLnr70Rp/9xicBO6d2rU+V/B3JujDw2gPWxYKEnEeh0CGCw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-stately/utils@3.10.5': + resolution: {integrity: sha512-iMQSGcpaecghDIh3mZEpZfoFH3ExBwTtuBEcvZ2XnGzCgQjeYXcMdIUwAfVQLXFTdHUHGF6Gu6/dFrYsCzySBQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-types/shared@3.27.0': + resolution: {integrity: sha512-gvznmLhi6JPEf0bsq7SwRYTHAKKq/wcmKqFez9sRdbED+SPMUmK5omfZ6w3EwUFQHbYUa4zPBYedQ7Knv70RMw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@rollup/rollup-android-arm-eabi@4.34.3': + resolution: {integrity: sha512-8kq/NjMKkMTGKMPldWihncOl62kgnLYk7cW+/4NCUWfS70/wz4+gQ7rMxMMpZ3dIOP/xw7wKNzIuUnN/H2GfUg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.34.3': + resolution: {integrity: sha512-1PqMHiuRochQ6++SDI7SaRDWJKr/NgAlezBi5nOne6Da6IWJo3hK0TdECBDwd92IUDPG4j/bZmWuwOnomNT8wA==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.34.3': + resolution: {integrity: sha512-fqbrykX4mGV3DlCDXhF4OaMGcchd2tmLYxVt3On5oOZWVDFfdEoYAV2alzNChl8OzNaeMAGqm1f7gk7eIw/uDg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.34.3': + resolution: {integrity: sha512-8Wxrx/KRvMsTyLTbdrMXcVKfpW51cCNW8x7iQD72xSEbjvhCY3b+w83Bea3nQfysTMR7K28esc+ZFITThXm+1w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.34.3': + resolution: {integrity: sha512-lpBmV2qSiELh+ATQPTjQczt5hvbTLsE0c43Rx4bGxN2VpnAZWy77we7OO62LyOSZNY7CzjMoceRPc+Lt4e9J6A==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.34.3': + resolution: {integrity: sha512-sNPvBIXpgaYcI6mAeH13GZMXFrrw5mdZVI1M9YQPRG2LpjwL8DSxSIflZoh/B5NEuOi53kxsR/S2GKozK1vDXA==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.34.3': + resolution: {integrity: sha512-MW6N3AoC61OfE1VgnN5O1OW0gt8VTbhx9s/ZEPLBM11wEdHjeilPzOxVmmsrx5YmejpGPvez8QwGGvMU+pGxpw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.34.3': + resolution: {integrity: sha512-2SQkhr5xvatYq0/+H6qyW0zvrQz9LM4lxGkpWURLoQX5+yP8MsERh4uWmxFohOvwCP6l/+wgiHZ1qVwLDc7Qmw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.34.3': + resolution: {integrity: sha512-R3JLYt8YoRwKI5shJsovLpcR6pwIMui/MGG/MmxZ1DYI3iRSKI4qcYrvYgDf4Ss2oCR3RL3F3dYK7uAGQgMIuQ==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.34.3': + resolution: {integrity: sha512-4XQhG8v/t3S7Rxs7rmFUuM6j09hVrTArzONS3fUZ6oBRSN/ps9IPQjVhp62P0W3KhqJdQADo/MRlYRMdgxr/3w==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loongarch64-gnu@4.34.3': + resolution: {integrity: sha512-QlW1jCUZ1LHUIYCAK2FciVw1ptHsxzApYVi05q7bz2A8oNE8QxQ85NhM4arLxkAlcnS42t4avJbSfzSQwbIaKg==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-powerpc64le-gnu@4.34.3': + resolution: {integrity: sha512-kMbLToizVeCcN69+nnm20Dh0hrRIAjgaaL+Wh0gWZcNt8e542d2FUGtsyuNsHVNNF3gqTJrpzUGIdwMGLEUM7g==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.34.3': + resolution: {integrity: sha512-YgD0DnZ3CHtvXRH8rzjVSxwI0kMTr0RQt3o1N92RwxGdx7YejzbBO0ELlSU48DP96u1gYYVWfUhDRyaGNqJqJg==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.34.3': + resolution: {integrity: sha512-dIOoOz8altjp6UjAi3U9EW99s8nta4gzi52FeI45GlPyrUH4QixUoBMH9VsVjt+9A2RiZBWyjYNHlJ/HmJOBCQ==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.34.3': + resolution: {integrity: sha512-lOyG3aF4FTKrhpzXfMmBXgeKUUXdAWmP2zSNf8HTAXPqZay6QYT26l64hVizBjq+hJx3pl0DTEyvPi9sTA6VGA==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.34.3': + resolution: {integrity: sha512-usztyYLu2i+mYzzOjqHZTaRXbUOqw3P6laNUh1zcqxbPH1P2Tz/QdJJCQSnGxCtsRQeuU2bCyraGMtMumC46rw==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-win32-arm64-msvc@4.34.3': + resolution: {integrity: sha512-ojFOKaz/ZyalIrizdBq2vyc2f0kFbJahEznfZlxdB6pF9Do6++i1zS5Gy6QLf8D7/S57MHrmBLur6AeRYeQXSA==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.34.3': + resolution: {integrity: sha512-K/V97GMbNa+Da9mGcZqmSl+DlJmWfHXTuI9V8oB2evGsQUtszCl67+OxWjBKpeOnYwox9Jpmt/J6VhpeRCYqow==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.34.3': + resolution: {integrity: sha512-CUypcYP31Q8O04myV6NKGzk9GVXslO5EJNfmARNSzLF2A+5rmZUlDJ4et6eoJaZgBT9wrC2p4JZH04Vkic8HdQ==} + cpu: [x64] + os: [win32] + + '@swc/helpers@0.5.15': + resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} + + '@tailwindcss/node@4.0.3': + resolution: {integrity: sha512-QsVJokOl0pJ4AbJV33D2npvLcHGPWi5MOSZtrtE0GT3tSx+3D0JE2lokLA8yHS1x3oCY/3IyRyy7XX6tmzid7A==} + + '@tailwindcss/oxide-android-arm64@4.0.3': + resolution: {integrity: sha512-S8XOTQuMnpijZRlPm5HBzPJjZ28quB+40LSRHjRnQF6rRYKsvpr1qkY7dfwsetNdd+kMLOMDsvmuT8WnqqETvg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.0.3': + resolution: {integrity: sha512-smrY2DpzhXvgDhZtQlYAl8+vxJ04lv2/64C1eiRxvsRT2nkw/q+zA1/eAYKvUHat6cIuwqDku3QucmrUT6pCeg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.0.3': + resolution: {integrity: sha512-NTz8x/LcGUjpZAWUxz0ZuzHao90Wj9spoQgomwB+/hgceh5gcJDfvaBYqxLFpKzVglpnbDSq1Fg0p0zI4oa5Pg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.0.3': + resolution: {integrity: sha512-yQc9Q0JCOp3kkAV8gKgDctXO60IkQhHpqGB+KgOccDtD5UmN6Q5+gd+lcsDyQ7N8dRuK1fAud51xQpZJgKfm7g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.0.3': + resolution: {integrity: sha512-e1ivVMLSnxTOU1O3npnxN16FEyWM/g3SuH2pP6udxXwa0/SnSAijRwcAYRpqIlhVKujr158S8UeHxQjC4fGl4w==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.0.3': + resolution: {integrity: sha512-PLrToqQqX6sdJ9DmMi8IxZWWrfjc9pdi9AEEPTrtMts3Jm9HBi1WqEeF1VwZZ2aW9TXloE5OwA35zuuq1Bhb/Q==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.0.3': + resolution: {integrity: sha512-YlzRxx7N1ampfgSKzEDw0iwDkJXUInR4cgNEqmR4TzHkU2Vhg59CGPJrTI7dxOBofD8+O35R13Nk9Ytyv0JUFg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.0.3': + resolution: {integrity: sha512-Xfc3z/li6XkuD7Hs+Uk6pjyCXnfnd9zuQTKOyDTZJ544xc2yoMKUkuDw6Et9wb31MzU2/c0CIUpTDa71lL9KHw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.0.3': + resolution: {integrity: sha512-ugKVqKzwa/cjmqSQG17aS9DYrEcQ/a5NITcgmOr3JLW4Iz64C37eoDlkC8tIepD3S/Td/ywKAolTQ8fKbjEL4g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-win32-arm64-msvc@4.0.3': + resolution: {integrity: sha512-qHPDMl+UUwsk1RMJMgAXvhraWqUUT+LR/tkXix5RA39UGxtTrHwsLIN1AhNxI5i2RFXAXfmFXDqZCdyQ4dWmAQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.0.3': + resolution: {integrity: sha512-+ujwN4phBGyOsPyLgGgeCyUm4Mul+gqWVCIGuSXWgrx9xVUnf6LVXrw0BDBc9Aq1S2qMyOTX4OkCGbZeoIo8Qw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.0.3': + resolution: {integrity: sha512-FFcp3VNvRjjmFA39ORM27g2mbflMQljhvM7gxBAujHxUy4LXlKa6yMF9wbHdTbPqTONiCyyOYxccvJyVyI/XBg==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.0.3': + resolution: {integrity: sha512-qUyxuhuI2eTgRJ+qfCQRAr69Cw7BdSz+PoNFUNoRuhPjikNC8+sxK+Mi/chaXAXewjv/zbf6if6z6ItVLh+e9Q==} + + '@tailwindcss/typography@0.5.16': + resolution: {integrity: sha512-0wDLwCVF5V3x3b1SGXPCDcdsbDHMBe+lkFzBRaHeLvNi+nrrnZ1lA18u+OTWO8iSWU2GxUOCvlXtDuqftc1oiA==} + peerDependencies: + tailwindcss: '>=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1' + + '@tailwindcss/vite@4.0.3': + resolution: {integrity: sha512-Qj6rSO+EvXnNDymloKZ11D54JJTnDrkRWJBzNHENDxjt0HtrCZJbSLIrcJ/WdaoU4othrel/oFqHpO/doxIS/Q==} + peerDependencies: + vite: ^5.2.0 || ^6 + + '@tanstack/react-virtual@3.12.0': + resolution: {integrity: sha512-6krceiPN07kpxXmU6m8AY7EL0X1gHLu8m3nJdh4phvktzVNxkQfBmSwnRUpoUjGQO1PAn8wSAhYaL8hY1cS1vw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + '@tanstack/virtual-core@3.12.0': + resolution: {integrity: sha512-7mDINtua3v/pOnn6WUmuT9dPXYSO7WidFej7JzoAfqEOcbbpt/iZ1WPqd+eg+FnrL9nUJK8radqj4iAU51Zchg==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.6.8': + resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.6': + resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} + + '@types/cookie@0.6.0': + resolution: {integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==} + + '@types/estree@1.0.6': + resolution: {integrity: sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@types/linkify-it@5.0.0': + resolution: {integrity: sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==} + + '@types/markdown-it@14.1.2': + resolution: {integrity: sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==} + + '@types/mdurl@2.0.0': + resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==} + + '@types/react-dom@19.0.3': + resolution: {integrity: sha512-0Knk+HJiMP/qOZgMyNFamlIjw9OFCsyC2ZbigmEEyXXixgre6IQpm/4V+r3qH4GC1JPvRJKInw+on2rV6YZLeA==} + peerDependencies: + '@types/react': ^19.0.0 + + '@types/react@19.0.8': + resolution: {integrity: sha512-9P/o1IGdfmQxrujGbIMDyYaaCykhLKc0NGCtYcECNUr9UAaDe4gwvV9bR6tvd5Br1SG0j+PBpbKr2UYY8CwqSw==} + + '@typescript-eslint/eslint-plugin@8.23.0': + resolution: {integrity: sha512-vBz65tJgRrA1Q5gWlRfvoH+w943dq9K1p1yDBY2pc+a1nbBLZp7fB9+Hk8DaALUbzjqlMfgaqlVPT1REJdkt/w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.0.0 || ^8.0.0-alpha.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/parser@8.23.0': + resolution: {integrity: sha512-h2lUByouOXFAlMec2mILeELUbME5SZRN/7R9Cw2RD2lRQQY08MWMM+PmVVKKJNK1aIwqTo9t/0CvOxwPbRIE2Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/scope-manager@8.23.0': + resolution: {integrity: sha512-OGqo7+dXHqI7Hfm+WqkZjKjsiRtFUQHPdGMXzk5mYXhJUedO7e/Y7i8AK3MyLMgZR93TX4bIzYrfyVjLC+0VSw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/type-utils@8.23.0': + resolution: {integrity: sha512-iIuLdYpQWZKbiH+RkCGc6iu+VwscP5rCtQ1lyQ7TYuKLrcZoeJVpcLiG8DliXVkUxirW/PWlmS+d6yD51L9jvA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/types@8.23.0': + resolution: {integrity: sha512-1sK4ILJbCmZOTt9k4vkoulT6/y5CHJ1qUYxqpF1K/DBAd8+ZUL4LlSCxOssuH5m4rUaaN0uS0HlVPvd45zjduQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.23.0': + resolution: {integrity: sha512-LcqzfipsB8RTvH8FX24W4UUFk1bl+0yTOf9ZA08XngFwMg4Kj8A+9hwz8Cr/ZS4KwHrmo9PJiLZkOt49vPnuvQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/utils@8.23.0': + resolution: {integrity: sha512-uB/+PSo6Exu02b5ZEiVtmY6RVYO7YU5xqgzTIVZwTHvvK3HsL8tZZHFaTLFtRG3CsV4A5mhOv+NZx5BlhXPyIA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/visitor-keys@8.23.0': + resolution: {integrity: sha512-oWWhcWDLwDfu++BGTZcmXWqpwtkwb5o7fxUIGksMQQDSdPW9prsSnfIOZMlsj4vBOSrcnjIUZMiIjODgGosFhQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@vitejs/plugin-react@4.3.4': + resolution: {integrity: sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + vite: ^4.2.0 || ^5.0.0 || ^6.0.0 + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.14.0: + resolution: {integrity: sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + autoprefixer@10.4.20: + resolution: {integrity: sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.4: + resolution: {integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + caniuse-lite@1.0.30001697: + resolution: {integrity: sha512-GwNPlWJin8E+d7Gxq96jxM6w0w+VFeyyXRsjU58emtkYqnbwHqXm5uT2uCmO0RQE9htWknOP4xtBlLmM/gWxvQ==} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + cookie@1.0.2: + resolution: {integrity: sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==} + engines: {node: '>=18'} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + detect-libc@1.0.3: + resolution: {integrity: sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==} + engines: {node: '>=0.10'} + hasBin: true + + electron-to-chromium@1.5.92: + resolution: {integrity: sha512-BeHgmNobs05N1HMmMZ7YIuHfYBGlq/UmvlsTgg+fsbFs9xVMj+xJHFg19GN04+9Q+r8Xnh9LXqaYIyEWElnNgQ==} + + enhanced-resolve@5.18.1: + resolution: {integrity: sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==} + engines: {node: '>=10.13.0'} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + esbuild@0.24.2: + resolution: {integrity: sha512-+9egpBW8I3CD5XPe0n6BfT5fxLzxrlDzqydF3aviG+9ni1lDC/OvMHcxqEFV0+LANZG5R1bFMWfUrjVsdwxJvA==} + engines: {node: '>=18'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-plugin-react-hooks@5.1.0: + resolution: {integrity: sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw==} + engines: {node: '>=10'} + peerDependencies: + eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0 + + eslint-plugin-react-refresh@0.4.18: + resolution: {integrity: sha512-IRGEoFn3OKalm3hjfolEWGqoF/jPqeEYFp+C8B0WMzwGwBMvlRDQd06kghDhF0C61uJ6WfSDhEZE/sAQjduKgw==} + peerDependencies: + eslint: '>=8.40' + + eslint-scope@8.2.0: + resolution: {integrity: sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@4.2.0: + resolution: {integrity: sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint@9.19.0: + resolution: {integrity: sha512-ug92j0LepKlbbEv6hD911THhoRHmbdXt2gX+VDABAW/Ir7D3nqKdv5Pf5vtlyY6HQMTEP2skXY43ueqTCWssEA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@10.3.0: + resolution: {integrity: sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.19.0: + resolution: {integrity: sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==} + + file-entry-cache@8.0.0: + resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} + engines: {node: '>=16.0.0'} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@4.0.1: + resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} + engines: {node: '>=16'} + + flatted@3.3.2: + resolution: {integrity: sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + globals@14.0.0: + resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} + engines: {node: '>=18'} + + globals@15.14.0: + resolution: {integrity: sha512-OkToC372DtlQeje9/zHIo5CT8lRP/FUgEOKBEhU4e0abL7J7CD24fD9ohiLN5hagG/kWCYj4K5oaxxtj2Z0Dig==} + engines: {node: '>=18'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + html-parse-stringify@3.0.1: + resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==} + + i18next@24.2.2: + resolution: {integrity: sha512-NE6i86lBCKRYZa5TaUDkU5S4HFgLIEJRLr3Whf2psgaxBleQ2LC1YW1Vc+SCgkAW7VEzndT6al6+CzegSUHcTQ==} + peerDependencies: + typescript: ^5 + peerDependenciesMeta: + typescript: + optional: true + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + jiti@2.4.2: + resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} + hasBin: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lightningcss-darwin-arm64@1.29.1: + resolution: {integrity: sha512-HtR5XJ5A0lvCqYAoSv2QdZZyoHNttBpa5EP9aNuzBQeKGfbyH5+UipLWvVzpP4Uml5ej4BYs5I9Lco9u1fECqw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.29.1: + resolution: {integrity: sha512-k33G9IzKUpHy/J/3+9MCO4e+PzaFblsgBjSGlpAaFikeBFm8B/CkO3cKU9oI4g+fjS2KlkLM/Bza9K/aw8wsNA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.29.1: + resolution: {integrity: sha512-0SUW22fv/8kln2LnIdOCmSuXnxgxVC276W5KLTwoehiO0hxkacBxjHOL5EtHD8BAXg2BvuhsJPmVMasvby3LiQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.29.1: + resolution: {integrity: sha512-sD32pFvlR0kDlqsOZmYqH/68SqUMPNj+0pucGxToXZi4XZgZmqeX/NkxNKCPsswAXU3UeYgDSpGhu05eAufjDg==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.29.1: + resolution: {integrity: sha512-0+vClRIZ6mmJl/dxGuRsE197o1HDEeeRk6nzycSy2GofC2JsY4ifCRnvUWf/CUBQmlrvMzt6SMQNMSEu22csWQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.29.1: + resolution: {integrity: sha512-UKMFrG4rL/uHNgelBsDwJcBqVpzNJbzsKkbI3Ja5fg00sgQnHw/VrzUTEc4jhZ+AN2BvQYz/tkHu4vt1kLuJyw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.29.1: + resolution: {integrity: sha512-u1S+xdODy/eEtjADqirA774y3jLcm8RPtYztwReEXoZKdzgsHYPl0s5V52Tst+GKzqjebkULT86XMSxejzfISw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.29.1: + resolution: {integrity: sha512-L0Tx0DtaNUTzXv0lbGCLB/c/qEADanHbu4QdcNOXLIe1i8i22rZRpbT3gpWYsCh9aSL9zFujY/WmEXIatWvXbw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.29.1: + resolution: {integrity: sha512-QoOVnkIEFfbW4xPi+dpdft/zAKmgLgsRHfJalEPYuJDOWf7cLQzYg0DEh8/sn737FaeMJxHZRc1oBreiwZCjog==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.29.1: + resolution: {integrity: sha512-NygcbThNBe4JElP+olyTI/doBNGJvLs3bFCRPdvuCcxZCcCZ71B858IHpdm7L1btZex0FvCmM17FK98Y9MRy1Q==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.29.1: + resolution: {integrity: sha512-FmGoeD4S05ewj+AkhTY+D+myDvXI6eL27FjHIjoyUkO/uw7WZD1fBVs0QxeYWa7E17CUHJaYX/RUGISCtcrG4Q==} + engines: {node: '>= 12.0.0'} + + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash.castarray@4.4.0: + resolution: {integrity: sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + lucide-react@0.474.0: + resolution: {integrity: sha512-CmghgHkh0OJNmxGKWc0qfPJCYHASPMVSyGY8fj3xgk4v84ItqDg64JNKFZn5hC6E0vHi6gxnbCgwhyVB09wQtA==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} + hasBin: true + + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.8: + resolution: {integrity: sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + postcss-selector-parser@6.0.10: + resolution: {integrity: sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.5.1: + resolution: {integrity: sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + react-dom@19.0.0: + resolution: {integrity: sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==} + peerDependencies: + react: ^19.0.0 + + react-i18next@15.4.0: + resolution: {integrity: sha512-Py6UkX3zV08RTvL6ZANRoBh9sL/ne6rQq79XlkHEdd82cZr2H9usbWpUNVadJntIZP2pu3M2rL1CN+5rQYfYFw==} + peerDependencies: + i18next: '>= 23.2.3' + react: '>= 16.8.0' + react-dom: '*' + react-native: '*' + peerDependenciesMeta: + react-dom: + optional: true + react-native: + optional: true + + react-icons@5.4.0: + resolution: {integrity: sha512-7eltJxgVt7X64oHh6wSWNwwbKTCtMfK35hcjvJS0yxEAhPM8oUKdS3+kqaW1vicIltw+kR2unHaa12S9pPALoQ==} + peerDependencies: + react: '*' + + react-refresh@0.14.2: + resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} + engines: {node: '>=0.10.0'} + + react-router-dom@7.1.5: + resolution: {integrity: sha512-/4f9+up0Qv92D3bB8iN5P1s3oHAepSGa9h5k6tpTFlixTTskJZwKGhJ6vRJ277tLD1zuaZTt95hyGWV1Z37csQ==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + + react-router@7.1.5: + resolution: {integrity: sha512-8BUF+hZEU4/z/JD201yK6S+UYhsf58bzYIDq2NS1iGpwxSXDu7F+DeGSkIXMFBuHZB21FSiCzEcUb18cQNdRkA==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + peerDependenciesMeta: + react-dom: + optional: true + + react@19.0.0: + resolution: {integrity: sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==} + engines: {node: '>=0.10.0'} + + regenerator-runtime@0.14.1: + resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rollup@4.34.3: + resolution: {integrity: sha512-ORCtU0UBJyiAIn9m0llUXJXAswG/68pZptCrqxHG7//Z2DDzAUeyyY5hqf4XrsGlUxscMr9GkQ2QI7KTLqeyPw==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + scheduler@0.25.0: + resolution: {integrity: sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + + set-cookie-parser@2.7.1: + resolution: {integrity: sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + tabbable@6.2.0: + resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} + + tailwindcss@4.0.3: + resolution: {integrity: sha512-ImmZF0Lon5RrQpsEAKGxRvHwCvMgSC4XVlFRqmbzTEDb/3wvin9zfEZrMwgsa3yqBbPqahYcVI6lulM2S7IZAA==} + + tapable@2.2.1: + resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} + engines: {node: '>=6'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + ts-api-utils@2.0.1: + resolution: {integrity: sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w==} + engines: {node: '>=18.12'} + peerDependencies: + typescript: '>=4.8.4' + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + turbo-stream@2.4.0: + resolution: {integrity: sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + typescript-eslint@8.23.0: + resolution: {integrity: sha512-/LBRo3HrXr5LxmrdYSOCvoAMm7p2jNizNfbIpCgvG4HMsnoprRUOce/+8VJ9BDYWW68rqIENE/haVLWPeFZBVQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + typescript@5.7.3: + resolution: {integrity: sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==} + engines: {node: '>=14.17'} + hasBin: true + + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + + update-browserslist-db@1.1.2: + resolution: {integrity: sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vite@6.1.0: + resolution: {integrity: sha512-RjjMipCKVoR4hVfPY6GQTgveinjNuyLw+qruksLDvA5ktI1150VmcMBKmQaEWJhg/j6Uaf6dNCNA0AfdzUb/hQ==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: '>=1.21.0' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + void-elements@3.1.0: + resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} + engines: {node: '>=0.10.0'} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yaml@2.7.0: + resolution: {integrity: sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==} + engines: {node: '>= 14'} + hasBin: true + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.26.2': + dependencies: + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.26.5': {} + + '@babel/core@7.26.7': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.5 + '@babel/helper-compilation-targets': 7.26.5 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.7) + '@babel/helpers': 7.26.7 + '@babel/parser': 7.26.7 + '@babel/template': 7.25.9 + '@babel/traverse': 7.26.7 + '@babel/types': 7.26.7 + convert-source-map: 2.0.0 + debug: 4.4.0 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.26.5': + dependencies: + '@babel/parser': 7.26.7 + '@babel/types': 7.26.7 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/helper-compilation-targets@7.26.5': + dependencies: + '@babel/compat-data': 7.26.5 + '@babel/helper-validator-option': 7.25.9 + browserslist: 4.24.4 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-module-imports@7.25.9': + dependencies: + '@babel/traverse': 7.26.7 + '@babel/types': 7.26.7 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.7)': + dependencies: + '@babel/core': 7.26.7 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.26.7 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.26.5': {} + + '@babel/helper-string-parser@7.25.9': {} + + '@babel/helper-validator-identifier@7.25.9': {} + + '@babel/helper-validator-option@7.25.9': {} + + '@babel/helpers@7.26.7': + dependencies: + '@babel/template': 7.25.9 + '@babel/types': 7.26.7 + + '@babel/parser@7.26.7': + dependencies: + '@babel/types': 7.26.7 + + '@babel/plugin-transform-react-jsx-self@7.25.9(@babel/core@7.26.7)': + dependencies: + '@babel/core': 7.26.7 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-transform-react-jsx-source@7.25.9(@babel/core@7.26.7)': + dependencies: + '@babel/core': 7.26.7 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/runtime@7.26.7': + dependencies: + regenerator-runtime: 0.14.1 + + '@babel/template@7.25.9': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.26.7 + '@babel/types': 7.26.7 + + '@babel/traverse@7.26.7': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.5 + '@babel/parser': 7.26.7 + '@babel/template': 7.25.9 + '@babel/types': 7.26.7 + debug: 4.4.0 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.26.7': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@esbuild/aix-ppc64@0.24.2': + optional: true + + '@esbuild/android-arm64@0.24.2': + optional: true + + '@esbuild/android-arm@0.24.2': + optional: true + + '@esbuild/android-x64@0.24.2': + optional: true + + '@esbuild/darwin-arm64@0.24.2': + optional: true + + '@esbuild/darwin-x64@0.24.2': + optional: true + + '@esbuild/freebsd-arm64@0.24.2': + optional: true + + '@esbuild/freebsd-x64@0.24.2': + optional: true + + '@esbuild/linux-arm64@0.24.2': + optional: true + + '@esbuild/linux-arm@0.24.2': + optional: true + + '@esbuild/linux-ia32@0.24.2': + optional: true + + '@esbuild/linux-loong64@0.24.2': + optional: true + + '@esbuild/linux-mips64el@0.24.2': + optional: true + + '@esbuild/linux-ppc64@0.24.2': + optional: true + + '@esbuild/linux-riscv64@0.24.2': + optional: true + + '@esbuild/linux-s390x@0.24.2': + optional: true + + '@esbuild/linux-x64@0.24.2': + optional: true + + '@esbuild/netbsd-arm64@0.24.2': + optional: true + + '@esbuild/netbsd-x64@0.24.2': + optional: true + + '@esbuild/openbsd-arm64@0.24.2': + optional: true + + '@esbuild/openbsd-x64@0.24.2': + optional: true + + '@esbuild/sunos-x64@0.24.2': + optional: true + + '@esbuild/win32-arm64@0.24.2': + optional: true + + '@esbuild/win32-ia32@0.24.2': + optional: true + + '@esbuild/win32-x64@0.24.2': + optional: true + + '@eslint-community/eslint-utils@4.4.1(eslint@9.19.0(jiti@2.4.2))': + dependencies: + eslint: 9.19.0(jiti@2.4.2) + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.1': {} + + '@eslint/config-array@0.19.2': + dependencies: + '@eslint/object-schema': 2.1.6 + debug: 4.4.0 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@eslint/core@0.10.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/eslintrc@3.2.0': + dependencies: + ajv: 6.12.6 + debug: 4.4.0 + espree: 10.3.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.19.0': {} + + '@eslint/object-schema@2.1.6': {} + + '@eslint/plugin-kit@0.2.5': + dependencies: + '@eslint/core': 0.10.0 + levn: 0.4.1 + + '@floating-ui/core@1.6.9': + dependencies: + '@floating-ui/utils': 0.2.9 + + '@floating-ui/dom@1.6.13': + dependencies: + '@floating-ui/core': 1.6.9 + '@floating-ui/utils': 0.2.9 + + '@floating-ui/react-dom@2.1.2(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/dom': 1.6.13 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@floating-ui/react@0.26.28(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/react-dom': 2.1.2(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@floating-ui/utils': 0.2.9 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + tabbable: 6.2.0 + + '@floating-ui/utils@0.2.9': {} + + '@headlessui/react@2.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/react': 0.26.28(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/focus': 3.19.1(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/interactions': 3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@tanstack/react-virtual': 3.12.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.6': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.3.1 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.3.1': {} + + '@humanwhocodes/retry@0.4.1': {} + + '@jridgewell/gen-mapping@0.3.8': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.19.0 + + '@react-aria/focus@3.19.1(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/interactions': 3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/utils': 3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + clsx: 2.1.1 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-aria/interactions@3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/ssr': 3.9.7(react@19.0.0) + '@react-aria/utils': 3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-aria/ssr@3.9.7(react@19.0.0)': + dependencies: + '@swc/helpers': 0.5.15 + react: 19.0.0 + + '@react-aria/utils@3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/ssr': 3.9.7(react@19.0.0) + '@react-stately/utils': 3.10.5(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + clsx: 2.1.1 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-stately/utils@3.10.5(react@19.0.0)': + dependencies: + '@swc/helpers': 0.5.15 + react: 19.0.0 + + '@react-types/shared@3.27.0(react@19.0.0)': + dependencies: + react: 19.0.0 + + '@rollup/rollup-android-arm-eabi@4.34.3': + optional: true + + '@rollup/rollup-android-arm64@4.34.3': + optional: true + + '@rollup/rollup-darwin-arm64@4.34.3': + optional: true + + '@rollup/rollup-darwin-x64@4.34.3': + optional: true + + '@rollup/rollup-freebsd-arm64@4.34.3': + optional: true + + '@rollup/rollup-freebsd-x64@4.34.3': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.34.3': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.34.3': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.34.3': + optional: true + + '@rollup/rollup-linux-loongarch64-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-powerpc64le-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.34.3': + optional: true + + '@rollup/rollup-linux-x64-musl@4.34.3': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.34.3': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.34.3': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.34.3': + optional: true + + '@swc/helpers@0.5.15': + dependencies: + tslib: 2.8.1 + + '@tailwindcss/node@4.0.3': + dependencies: + enhanced-resolve: 5.18.1 + jiti: 2.4.2 + tailwindcss: 4.0.3 + + '@tailwindcss/oxide-android-arm64@4.0.3': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.0.3': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.0.3': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.0.3': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.0.3': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.0.3': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.0.3': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.0.3': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.0.3': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.0.3': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.0.3': + optional: true + + '@tailwindcss/oxide@4.0.3': + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.0.3 + '@tailwindcss/oxide-darwin-arm64': 4.0.3 + '@tailwindcss/oxide-darwin-x64': 4.0.3 + '@tailwindcss/oxide-freebsd-x64': 4.0.3 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.0.3 + '@tailwindcss/oxide-linux-arm64-gnu': 4.0.3 + '@tailwindcss/oxide-linux-arm64-musl': 4.0.3 + '@tailwindcss/oxide-linux-x64-gnu': 4.0.3 + '@tailwindcss/oxide-linux-x64-musl': 4.0.3 + '@tailwindcss/oxide-win32-arm64-msvc': 4.0.3 + '@tailwindcss/oxide-win32-x64-msvc': 4.0.3 + + '@tailwindcss/postcss@4.0.3': + dependencies: + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.0.3 + '@tailwindcss/oxide': 4.0.3 + lightningcss: 1.29.1 + postcss: 8.5.1 + tailwindcss: 4.0.3 + + '@tailwindcss/typography@0.5.16(tailwindcss@4.0.3)': + dependencies: + lodash.castarray: 4.4.0 + lodash.isplainobject: 4.0.6 + lodash.merge: 4.6.2 + postcss-selector-parser: 6.0.10 + tailwindcss: 4.0.3 + + '@tailwindcss/vite@4.0.3(vite@6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0))': + dependencies: + '@tailwindcss/node': 4.0.3 + '@tailwindcss/oxide': 4.0.3 + lightningcss: 1.29.1 + tailwindcss: 4.0.3 + vite: 6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0) + + '@tanstack/react-virtual@3.12.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@tanstack/virtual-core': 3.12.0 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@tanstack/virtual-core@3.12.0': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.26.7 + '@babel/types': 7.26.7 + '@types/babel__generator': 7.6.8 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.6 + + '@types/babel__generator@7.6.8': + dependencies: + '@babel/types': 7.26.7 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.26.7 + '@babel/types': 7.26.7 + + '@types/babel__traverse@7.20.6': + dependencies: + '@babel/types': 7.26.7 + + '@types/cookie@0.6.0': {} + + '@types/estree@1.0.6': {} + + '@types/json-schema@7.0.15': {} + + '@types/linkify-it@5.0.0': {} + + '@types/markdown-it@14.1.2': + dependencies: + '@types/linkify-it': 5.0.0 + '@types/mdurl': 2.0.0 + + '@types/mdurl@2.0.0': {} + + '@types/react-dom@19.0.3(@types/react@19.0.8)': + dependencies: + '@types/react': 19.0.8 + + '@types/react@19.0.8': + dependencies: + csstype: 3.1.3 + + '@typescript-eslint/eslint-plugin@8.23.0(@typescript-eslint/parser@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@eslint-community/regexpp': 4.12.1 + '@typescript-eslint/parser': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/scope-manager': 8.23.0 + '@typescript-eslint/type-utils': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/utils': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/visitor-keys': 8.23.0 + eslint: 9.19.0(jiti@2.4.2) + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.23.0 + '@typescript-eslint/types': 8.23.0 + '@typescript-eslint/typescript-estree': 8.23.0(typescript@5.7.3) + '@typescript-eslint/visitor-keys': 8.23.0 + debug: 4.4.0 + eslint: 9.19.0(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.23.0': + dependencies: + '@typescript-eslint/types': 8.23.0 + '@typescript-eslint/visitor-keys': 8.23.0 + + '@typescript-eslint/type-utils@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@typescript-eslint/typescript-estree': 8.23.0(typescript@5.7.3) + '@typescript-eslint/utils': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + debug: 4.4.0 + eslint: 9.19.0(jiti@2.4.2) + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.23.0': {} + + '@typescript-eslint/typescript-estree@8.23.0(typescript@5.7.3)': + dependencies: + '@typescript-eslint/types': 8.23.0 + '@typescript-eslint/visitor-keys': 8.23.0 + debug: 4.4.0 + fast-glob: 3.3.3 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.7.1 + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@9.19.0(jiti@2.4.2)) + '@typescript-eslint/scope-manager': 8.23.0 + '@typescript-eslint/types': 8.23.0 + '@typescript-eslint/typescript-estree': 8.23.0(typescript@5.7.3) + eslint: 9.19.0(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.23.0': + dependencies: + '@typescript-eslint/types': 8.23.0 + eslint-visitor-keys: 4.2.0 + + '@vitejs/plugin-react@4.3.4(vite@6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0))': + dependencies: + '@babel/core': 7.26.7 + '@babel/plugin-transform-react-jsx-self': 7.25.9(@babel/core@7.26.7) + '@babel/plugin-transform-react-jsx-source': 7.25.9(@babel/core@7.26.7) + '@types/babel__core': 7.20.5 + react-refresh: 0.14.2 + vite: 6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0) + transitivePeerDependencies: + - supports-color + + acorn-jsx@5.3.2(acorn@8.14.0): + dependencies: + acorn: 8.14.0 + + acorn@8.14.0: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + autoprefixer@10.4.20(postcss@8.5.1): + dependencies: + browserslist: 4.24.4 + caniuse-lite: 1.0.30001697 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.5.1 + postcss-value-parser: 4.2.0 + + balanced-match@1.0.2: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.4: + dependencies: + caniuse-lite: 1.0.30001697 + electron-to-chromium: 1.5.92 + node-releases: 2.0.19 + update-browserslist-db: 1.1.2(browserslist@4.24.4) + + callsites@3.1.0: {} + + caniuse-lite@1.0.30001697: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + clsx@2.1.1: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + concat-map@0.0.1: {} + + convert-source-map@2.0.0: {} + + cookie@1.0.2: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + csstype@3.1.3: {} + + debug@4.4.0: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + detect-libc@1.0.3: {} + + electron-to-chromium@1.5.92: {} + + enhanced-resolve@5.18.1: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.2.1 + + entities@4.5.0: {} + + esbuild@0.24.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.24.2 + '@esbuild/android-arm': 0.24.2 + '@esbuild/android-arm64': 0.24.2 + '@esbuild/android-x64': 0.24.2 + '@esbuild/darwin-arm64': 0.24.2 + '@esbuild/darwin-x64': 0.24.2 + '@esbuild/freebsd-arm64': 0.24.2 + '@esbuild/freebsd-x64': 0.24.2 + '@esbuild/linux-arm': 0.24.2 + '@esbuild/linux-arm64': 0.24.2 + '@esbuild/linux-ia32': 0.24.2 + '@esbuild/linux-loong64': 0.24.2 + '@esbuild/linux-mips64el': 0.24.2 + '@esbuild/linux-ppc64': 0.24.2 + '@esbuild/linux-riscv64': 0.24.2 + '@esbuild/linux-s390x': 0.24.2 + '@esbuild/linux-x64': 0.24.2 + '@esbuild/netbsd-arm64': 0.24.2 + '@esbuild/netbsd-x64': 0.24.2 + '@esbuild/openbsd-arm64': 0.24.2 + '@esbuild/openbsd-x64': 0.24.2 + '@esbuild/sunos-x64': 0.24.2 + '@esbuild/win32-arm64': 0.24.2 + '@esbuild/win32-ia32': 0.24.2 + '@esbuild/win32-x64': 0.24.2 + + escalade@3.2.0: {} + + escape-string-regexp@4.0.0: {} + + eslint-plugin-react-hooks@5.1.0(eslint@9.19.0(jiti@2.4.2)): + dependencies: + eslint: 9.19.0(jiti@2.4.2) + + eslint-plugin-react-refresh@0.4.18(eslint@9.19.0(jiti@2.4.2)): + dependencies: + eslint: 9.19.0(jiti@2.4.2) + + eslint-scope@8.2.0: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@4.2.0: {} + + eslint@9.19.0(jiti@2.4.2): + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@9.19.0(jiti@2.4.2)) + '@eslint-community/regexpp': 4.12.1 + '@eslint/config-array': 0.19.2 + '@eslint/core': 0.10.0 + '@eslint/eslintrc': 3.2.0 + '@eslint/js': 9.19.0 + '@eslint/plugin-kit': 0.2.5 + '@humanfs/node': 0.16.6 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.1 + '@types/estree': 1.0.6 + '@types/json-schema': 7.0.15 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.0 + escape-string-regexp: 4.0.0 + eslint-scope: 8.2.0 + eslint-visitor-keys: 4.2.0 + espree: 10.3.0 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + optionalDependencies: + jiti: 2.4.2 + transitivePeerDependencies: + - supports-color + + espree@10.3.0: + dependencies: + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) + eslint-visitor-keys: 4.2.0 + + esquery@1.6.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.19.0: + dependencies: + reusify: 1.0.4 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.2 + keyv: 4.5.4 + + flatted@3.3.2: {} + + fraction.js@4.3.7: {} + + fsevents@2.3.3: + optional: true + + gensync@1.0.0-beta.2: {} + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + globals@11.12.0: {} + + globals@14.0.0: {} + + globals@15.14.0: {} + + graceful-fs@4.2.11: {} + + graphemer@1.4.0: {} + + has-flag@4.0.0: {} + + html-parse-stringify@3.0.1: + dependencies: + void-elements: 3.1.0 + + i18next@24.2.2(typescript@5.7.3): + dependencies: + '@babel/runtime': 7.26.7 + optionalDependencies: + typescript: 5.7.3 + + ignore@5.3.2: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-number@7.0.0: {} + + isexe@2.0.0: {} + + jiti@2.4.2: {} + + js-tokens@4.0.0: {} + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + jsesc@3.1.0: {} + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@2.2.3: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lightningcss-darwin-arm64@1.29.1: + optional: true + + lightningcss-darwin-x64@1.29.1: + optional: true + + lightningcss-freebsd-x64@1.29.1: + optional: true + + lightningcss-linux-arm-gnueabihf@1.29.1: + optional: true + + lightningcss-linux-arm64-gnu@1.29.1: + optional: true + + lightningcss-linux-arm64-musl@1.29.1: + optional: true + + lightningcss-linux-x64-gnu@1.29.1: + optional: true + + lightningcss-linux-x64-musl@1.29.1: + optional: true + + lightningcss-win32-arm64-msvc@1.29.1: + optional: true + + lightningcss-win32-x64-msvc@1.29.1: + optional: true + + lightningcss@1.29.1: + dependencies: + detect-libc: 1.0.3 + optionalDependencies: + lightningcss-darwin-arm64: 1.29.1 + lightningcss-darwin-x64: 1.29.1 + lightningcss-freebsd-x64: 1.29.1 + lightningcss-linux-arm-gnueabihf: 1.29.1 + lightningcss-linux-arm64-gnu: 1.29.1 + lightningcss-linux-arm64-musl: 1.29.1 + lightningcss-linux-x64-gnu: 1.29.1 + lightningcss-linux-x64-musl: 1.29.1 + lightningcss-win32-arm64-msvc: 1.29.1 + lightningcss-win32-x64-msvc: 1.29.1 + + linkify-it@5.0.0: + dependencies: + uc.micro: 2.1.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.castarray@4.4.0: {} + + lodash.isplainobject@4.0.6: {} + + lodash.merge@4.6.2: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + lucide-react@0.474.0(react@19.0.0): + dependencies: + react: 19.0.0 + + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 + + mdurl@2.0.0: {} + + merge2@1.4.1: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.1 + + ms@2.1.3: {} + + nanoid@3.3.8: {} + + natural-compare@1.4.0: {} + + node-releases@2.0.19: {} + + normalize-range@0.1.2: {} + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + postcss-selector-parser@6.0.10: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.5.1: + dependencies: + nanoid: 3.3.8 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + punycode.js@2.3.1: {} + + punycode@2.3.1: {} + + queue-microtask@1.2.3: {} + + react-dom@19.0.0(react@19.0.0): + dependencies: + react: 19.0.0 + scheduler: 0.25.0 + + react-i18next@15.4.0(i18next@24.2.2(typescript@5.7.3))(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + '@babel/runtime': 7.26.7 + html-parse-stringify: 3.0.1 + i18next: 24.2.2(typescript@5.7.3) + react: 19.0.0 + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + react-icons@5.4.0(react@19.0.0): + dependencies: + react: 19.0.0 + + react-refresh@0.14.2: {} + + react-router-dom@7.1.5(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + react-router: 7.1.5(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + + react-router@7.1.5(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + '@types/cookie': 0.6.0 + cookie: 1.0.2 + react: 19.0.0 + set-cookie-parser: 2.7.1 + turbo-stream: 2.4.0 + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + react@19.0.0: {} + + regenerator-runtime@0.14.1: {} + + resolve-from@4.0.0: {} + + reusify@1.0.4: {} + + rollup@4.34.3: + dependencies: + '@types/estree': 1.0.6 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.34.3 + '@rollup/rollup-android-arm64': 4.34.3 + '@rollup/rollup-darwin-arm64': 4.34.3 + '@rollup/rollup-darwin-x64': 4.34.3 + '@rollup/rollup-freebsd-arm64': 4.34.3 + '@rollup/rollup-freebsd-x64': 4.34.3 + '@rollup/rollup-linux-arm-gnueabihf': 4.34.3 + '@rollup/rollup-linux-arm-musleabihf': 4.34.3 + '@rollup/rollup-linux-arm64-gnu': 4.34.3 + '@rollup/rollup-linux-arm64-musl': 4.34.3 + '@rollup/rollup-linux-loongarch64-gnu': 4.34.3 + '@rollup/rollup-linux-powerpc64le-gnu': 4.34.3 + '@rollup/rollup-linux-riscv64-gnu': 4.34.3 + '@rollup/rollup-linux-s390x-gnu': 4.34.3 + '@rollup/rollup-linux-x64-gnu': 4.34.3 + '@rollup/rollup-linux-x64-musl': 4.34.3 + '@rollup/rollup-win32-arm64-msvc': 4.34.3 + '@rollup/rollup-win32-ia32-msvc': 4.34.3 + '@rollup/rollup-win32-x64-msvc': 4.34.3 + fsevents: 2.3.3 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + scheduler@0.25.0: {} + + semver@6.3.1: {} + + semver@7.7.1: {} + + set-cookie-parser@2.7.1: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + source-map-js@1.2.1: {} + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + tabbable@6.2.0: {} + + tailwindcss@4.0.3: {} + + tapable@2.2.1: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + ts-api-utils@2.0.1(typescript@5.7.3): + dependencies: + typescript: 5.7.3 + + tslib@2.8.1: {} + + turbo-stream@2.4.0: {} + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + typescript-eslint@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3): + dependencies: + '@typescript-eslint/eslint-plugin': 8.23.0(@typescript-eslint/parser@8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/parser': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/utils': 8.23.0(eslint@9.19.0(jiti@2.4.2))(typescript@5.7.3) + eslint: 9.19.0(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + typescript@5.7.3: {} + + uc.micro@2.1.0: {} + + update-browserslist-db@1.1.2(browserslist@4.24.4): + dependencies: + browserslist: 4.24.4 + escalade: 3.2.0 + picocolors: 1.1.1 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + util-deprecate@1.0.2: {} + + vite@6.1.0(jiti@2.4.2)(lightningcss@1.29.1)(yaml@2.7.0): + dependencies: + esbuild: 0.24.2 + postcss: 8.5.1 + rollup: 4.34.3 + optionalDependencies: + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.29.1 + yaml: 2.7.0 + + void-elements@3.1.0: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + yallist@3.1.1: {} + + yaml@2.7.0: + optional: true + + yocto-queue@0.1.0: {} diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..f69c5d4 --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + "@tailwindcss/postcss": {}, + autoprefixer: {}, + }, +}; diff --git a/frontend/public/apple-touch-icon.png b/frontend/public/apple-touch-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..5bcb799bcfa8a96b8929d18b91166b401bf055f4 GIT binary patch literal 7807 zcmV-_9)RJAP)6~_mcO48C2O;IB=vqDS_Mair*xAvk;P+MIpC=u6OuxL@!ToO{VRimO6mw=>< zvJ|96vrNH~qAW_wrQEgu_u;wBJO4X(=HCB(@7;UfoX>}k{@-3U3xK|INU2zV_@Oayd9MWZ;-SwQF3TsqsGMoF$822%hI z1zzbOCv#7L4)!HhT}uvB3(&P>VM?bXS9>PO>af0ot8;aj(=&Y3tl_odrm$ubi{15b zATB}?B3&CQ*6-dXv6cd~+{Ih9=T)?IhqHtWWp|NLjO<(69}{oZubP0gxRtLlWq()!xBfY#QG z8*jYPy!53nO&I(9=Re;pU%tFFMweaDuwjU94gK+te>Bf~-t&?lw%Ted^OwK;rR-)a zUteDWdY^svF$)$fNQCq2U;o-_LyJGtGbg^PlPqPk~zN)Ee}aA5aM$>^ng3 zvBw?;)k5gnYp*r@CNy&7NV9U~%H;3$*I(cK_rL!wfz{0~CzU^SiE9C_rC z=GI$p%{6h3I_fBM&N*ifyn_SV7~Nm==PkC_Vo2l3)uD;G^afG?>!V8nKTK%}=jyueApZnb8IjeS+H@ZQC_7rOT3ayT>*a}uddy^?; z34lViaFGpARF#PnCwify)ej9?y-{qc`q4>Dd4%kKXg1_n0qy;R{JI)R)H|do20;?|=V$ zo34%%>E3(q^;Mhwyh65wMgdyKGi#@bwq}H`vcyFYNDukm!P4;X!w&}o1VGsnOuuNG z8?_a~)yTWv^)B z3J_OceYM$p@4a*NfsN-kzxhq)mq?rR`0Chp_)d+blzKBz+Xz5ohFi2~QNk;n7HpD) zG?=qi+0mm%8%%I-defVny#K{7eqmdDlpTMJ*^R>n?|a|-S`*T?BikMN&2N6w zGS>lG*AXoJ#nLOacD}%;y1U714`_P@Mc_IFl(>c!EABv9XyHNIcL|FB`Okme7ARkI z6=5UF=1n$pA@u;<(@s0h0H&eRM?Ufq^QljLs;wF}!{7bxcb)2oj?!DBepE;?&cT>k z16l_fefi5@Hth6;^ajudvp#ZV^Ncu&>C>ktD<3O5n=Ag|MP%c}C;jCwf4P&@cpCXq z;3c#Pa~>-yDo+++uLhv3m>jp_A6`BQ=ASTOLd%iECRh{hbUQv;JD2nqO2;m{FP?DemIeVa>NpFw02*k1`^t8LR}Q0E2} z>i#NUOF-+Wxo>>q8|I*c4ocXmohEn$#qwa=k~i9Dqty5cz#*otqN`O_Of_aTRE;8p zgNVtD{TMrP8ll@`9f8g}?>s{w6Qn_{Cxy0C*YS`*0YG@A^TC!wGE3CBW$!~koNE-l<31ky$#eoMNm>btpzi+ZhIleGr#=}~I)kH|w z@z=lp)v&K{JrS3LIr&=$8ri~nKm8r8!7?n1*@YFY6m*~U`iJc@4xuPFE&5?;SUX8t1L398i#y;5sXV`IntE>wqPHq`^P>XAOs`kBvs#!QrvU{XSR0JWN7gH!;`1mJiS zoNKS(A_b1~DB>3JCQH zr;}~glYWmMKi&}cBGhWn+WX&f%Pod4mQaCUSr{-6Cz4l6Io1Z8(IBJeOZ|TRT zsE0Fz=*L(vT;u`>pISQ^A}J6EQSCI@@gD@g_@Ap z0VH+U79!o|4m<2%2z(NIN|sw#_(gNIWNh2cPMpL@EPm(^ZE~GQ;>oHnCD4Y6DIa-pO(<`S@O=6}nm) zh>x)@Kx3xcaKjA~eG$_WW~&x%X>a^A!ANUu@GX#;mg4UNA2L0 z$|jp^(o%yo`S@NfK(}MIS_oF0^)WV0h{O;$N()3@p}CPHRI`)&7A&MUH+w8IG~ji^#<|F$M*>6 zf^zpURs&knX!%wscKXDGKpf*D-<=NxwoSU=aEA0+RC%8OWbG6)v0*%kEEP8T;oD*LVY^@^wZ6W zC!W}{ori#$v;}xRBcRKXkGGb*fEN3+D^a<9G|P@WYqc|=M+{qg^~rEeX`gg&=Q~h< zuZbN*Bu(tN z@#e|7LVesnYlW`b7b^Ch{DT|&C@ z$}3xkMTNv`rcZRp^RBPXo*ic%K--`trUg%-O|FUT`!YsA=bL0nX&+;=yj=D(f{rA? zqJD=@G-vc&Nf^5hXb-Pkk##O`68|iD>+I7lB%~n(l+!YDLOg8}r+pbCptXs|INLgInLF=ckPkPpJg@%kwNX%g6T!=q|3%c5Y~P`jTJ6 zbq_X4&^bLVA(?hp&p!L?R`_W;b=w-frWc`KrZn{e7e67Za(!}Lx$ z_piGm$Gk{-NX)ZM;;1Ru{f@m>1kh=G`P$dMW)431VCQPjgQ#+pC`R9>&$U5fS8rdY znPr!j6*?{D7iq$V660JVE840m3TWD_G-jYef#EE-9lcb|c03tIUq^mS`Goj}RMD_mXC65H-{OtbIegvq+8D&J_zD;);tcApupaa`ok zs8OSmRJqa0QJ!49H5CE0yhGqV_E%0;lWfp+*i{0L((=mL?s9KuyI3F$&}%w`nNB9Q zgvdrabQc9CO$`Omw$L9P$OE_bz~NHT$dX#jCJF3u>Dr-=70v;qhc=bVIA1@rz|IC( zu6!d^0nvtd42f-tI&`^mO0BIdKvTKYrO(hwBE!nWa(MXaI)vbh%g~1e?4w&TIH?O@ zwn51MheX?nXD@9fwv!U-*_#R_93QE?;Sl6dAONeyEzD1R?K0sGtl|K38lLsB@6}A!sPAoY2LP%xy*0;X3 z^~iUXa?vw|5zt)%v`ztLH%m;0M40irzVABg)6BE1TdfKXFV|~Xla!M1z4xMWVT`Alsl|%H#G~shHg-w!8`oc*r(&ScI7jX%YecGif5*ld4_r=ks#n5}q zeU=eD$)$W;p+hCF0{`=>9Av4-K|)Lhu^AzB>#et%ZMWUFRg_C~$b6qg@)ZcFyNohf z>4K$xn@@I5Kf8TyCeT;e6$Nz6%TZ9Wc>zr`yrWZkaaw}|v@Rk=7V#cXR_!7EIi#~gFapx@<(GOQflftFZcu*wm5JgUS#IIAiG zXks#0Wo<#sojcc%l{Aa&wg~JWF=9k!dG0!V%Uj;kF*f+JIO*%;$S;EDFO+`!+uxee zqerKzGd{jYl_QkPs+^CpSxyG~GE2fH?K=$9x|8PO_UW$jMJPDD)Ys7S$t#jY`nD{} zhTO!(`AW&%$9P?;gOBeqb5cQx`xxu`#7e4IbZQcvlW4_vC55y_AP>o0_}oLB@HqbX z<1 zfiL$HkFCyJ838>=)M0v!571eHl=}+-y}mIk4S9rAl8s*k4L|`Z6na@uk?f>Q9{aeM zYgXVm!GR(a5;dpmf}b3cC4*Gj(K0(+mdyUxw$mn41a!f*%LQmTWg^vPtxnVt@;Roi zv|Yd5hdg+k#>BA}TK}rGW$hN%^cKh(i^&zCo9b9LdIvMIXJMn&{0nLC(S49xI5b0t`8!JTI0xOJ= zE5O|ac1&+vc`W3M>(KX_gj{+VDmbm{yV8?UALjvYzWL_1lQ?XCiSNc)lm>p%7@T$1 zS;-^JnQ|eiZcE#^)2^-v9alHYRUXhVgMb^`^QQ%~`+)jkyR0>MiCJL7huyVB8GBM6 zphY;^4SwN;7q)J7Ekb<4vBpU3GIh980!#gDRME{@&B^@aBgSR(WzL*A=523#TkEx~ z4Is~}u4dR{9{}{Q0b7i9k%*5o8PCIP7sVRT_o3*!zq3Hi=av-+ONP%Led(o_wz3J? zcI>Pk-+1GVX3Ur|Ns+~i7n^b8#sv+M_!7*CcJ7a{W5>2OdOixBc+`W6z{XMnnC8u! zm)uoO`&?0HldWd(o+MBA^~w3Xi_2ygn<~tX){6*(8>r9=h4gtixuZ~?r@3vj+{5x- z9{`Yiq*Q{OM1By6tByH>dz1#J!7?Cr@_TlI=P$({XK`@WR%a zHJJ52p-wAvBJ+X&DQ9^Okwgh)xLlGfM?$Bac3Ruz?)n1uG*Gj#A&HEO1km=O($KEU z;ZwUj_G~tGOO`BYeI;z-%W~ay*M(N!Di=E8R<2xWMvfe5h&{pf%w7SLA=D%_!)9J> zR%pA^s0646LPV0`))lhvQQkAevXgzZE%}&2S@IRhY&RKFm7)sl0YS(|!ap#(u9VlE&Kw}36FzC@j+Ss4ve6fW` z?hbNzXp%Ba9{Qx;NKJ=jv4T=t=|71(D)r_<#LVI%O*O@zM z1_+tngUi$!&=KE)QrZg&O>Xjn#Sl`0*_VNeHo_{P2fAY*1fpvQK{N4WM?K+<*UlW`hl~LsmojQiU?L zU!jGGK07tGVcVJzy6NH5FPq2Ecghwlu6i!_fkOIVo92Ahj4W?t+2iO&eT@M$%;MsD zd!@eYvddcc777uhV<$fk0SE8Or+MMRg@%m(WfI9EQ4O+4+9#q8JwJ=^8rprd$EX91 zTnQ}89ACHVY80UPl%jsjoH;Y$fbCLwb^IM3c#RfGh2AeP!k?tVPnUYrC zPHXF`Pr%IPmi<5$*->+=R;_ANTQrzMPR4W_jAgV}s_VOq4wX@d4j|GiH9D$&#c&9y z3H-qd0qL7)b)T{KvYKZR?)WU{jy;L^f8W<6Z`7Q`XqUn7D%DZR6}pU0vAJV471AM? zp`oX?usDQ@ClDf5dBTJVPFZ3?e_EVvQtXvC_U~noH%h}Qqp!{=vA*!gLz>s>@ShAV2$z8*B_ zYttYUG-xPc=+WwjzVBK)UqUvDVJ#26l%0dmo^a^&0N2i4S9E0_dM|rhL*D@!cAs?8 zNroI7LVW76T?=WQ8f!_fiDZ(JH$yv3=-beu=8BK5Y5=NQe~NQ9=bPI3qScSR_S(zq z9Ld>aDaFyZ(eLf#Gee1*3)FN<>3i-<3Usb70nO(b5co`bTBptyNOvx_7oAuR*4kmJ zA$IHBpsLvmeF2O80rAZtc1JKd$WBcPvTG&X6@7Yb?XN0;Q`ZAODRsu}Gpi-3NTAE+Osterl6 zx))ScDbTa@Tm?33-QrmUwBq!%Y17*BzjaFxMcRpK#$c{B3eaUO9HceiR@SOg^P=X; zlH9;vVnArM%A8bHki~vgg99}Mba^wn8e)jOD2oF%1$0@%kM%T-0};?olW3ohCjz?9 z0noJJM?g1CqJ2J|2cf|2XwJ+#kV2{ig6$Ux)|T$TNUL%1aw6O6T4N60};^0 z_!i%)CsCqDg9zxpLr%j6tPmUl-7xv~`*0$l`yBxd8-E0J!vxyz!-;_I zcLX$S{1MO%6KKB=Cjz=B2q?)$)swzgyU_^fYRf0~E++>fpmQ=T7Oyr3BA}}+pV+&c z9Oz>}M?E$tR%%haj{#i^`NU4<XU4UuvmFjXJ0=l}g$?jSF7^Lzi0=lk%j{S~+ju!fu3Wj^K-VB?2T<_aiA06>qMPVijf07;Q)){{{bTuiS`al Ra4i4;002ovPDHLkV1m-wO6LFo literal 0 HcmV?d00001 diff --git a/frontend/public/favicon-96x96.png b/frontend/public/favicon-96x96.png new file mode 100644 index 0000000000000000000000000000000000000000..ff061c93e8e46025dfe9122f8e6683dc5c09f97a GIT binary patch literal 4406 zcmV-65y|d}P)CJUJ?WX~jG6v2f>us{J(z@V}ufFhd$AqWOpT*6|ZpdczsAwpR!vMESF z0g+u+p^yTBfC~meML>kv5)w!t2}x$g?{vB^{rYv^zHhx#)_YZ{d9QCT=luUUr}vw} zkme^>1PZyf-fVnGlYpF!ph-ZJfSeJ?x%s9CGzrKVft;IfdO(waoDs;m`P@BV+O%l{ zSup4)Q;U<)mzJe2H&170XKpy;*06w(75qv-Ew@ZwB;-ZF2*6)uK*~#b*9h{EBM$-w z4fw_Ylqc$V)@TwMO~6o;U1MUmt!y6QDAaxp0T$Tcu9@N8FC*Jee1 zg2z}YZ6~c`?{Id$sP6IxnYer)II@9)2`ySw{qk~V45q^*Yz9r|ad zj!Yh98OIuoV0fcRs4D^QDA!Ne=}y|Rl)eL{_X>gE(b4gEp-}jke*O>bC2zYRty{Nl zliuFmf17m5&7<6%J)s^13Fm!)=Z3z%zI_1swsI{zYyQDTLQM#;MiDO) zc4oM4i$~l-`sYnLmb@Kc|B;bOruGN=2YL%Fg`FYrxvW6bG2cz~yJgK!zKf{;I6HqM zJRuVRDN5L3=Rbh_o?`ITs?V=c?2D3)LjUBCJKMd4w4>#@goeqJCr<&m$q+P`zsLL9 zhR`Ln^)89i<~e}hMgH9bvOchO?b;KWE4)SO9_l%sdd`nozR1q!O7b3Q@3e?9E z^5>)d^#lA&fd9QI zPhL@cT}Ub9`xDPTqGeo8+hbGms<4>Sek=8@^Ysa}BB2}t?4jm=5$fG8_-;(q78Yy+ zg8F476ab)w@`+_;4N;a%_w#*M{rf@6&vc2P{cqCz`Dt|{!*i=vt=ewcvSn|0^sRdB zC^G>nRIK01q95v<=CS;%NI%?G5(S^a0=JPgzTc^bq9%NqkX14{Bkx*(QHx^KS=E|6 zb-hVls&iknX3dx-OO~v#WfZ+wqL|AP#nmRAGB+aY{+5msYNxy9w3k&yLM8&N(M}J7 zP4$3(H3H6cElkR>2(}}OV2}F3fa@VBex4b6GHl#*@#4j1L=uW3nbWQQ$-5yG@`AKJ zs?s`bq01s55DR|_Dc|scpXS=e%1xfoD8q*^v*y#7C!Fp)i*D-B#s?|$tdwI8urV|4 z7fF+jeXmFuk|FCg=sCxyr%7{l$r3gzB7lzZ({zmY;kxJGx+gnAsS!!6aRWFWGL^A+ zw+=UH{lyD-hg#grav+QLTXohF4k3RXb;)z(4O#jsvYt(udE}oMBB9{~U=@d871z44 zF$TWmx80X|isGnUA7I^#;-AYLYn+TJp+#or*IC(vn?uNO8G6psleZK2{u5Vc+kmnT zt))y=9^eu{!>^F`1_Ev#0W29&*;|Wg?p^#Wix0e0g3fmuov#duOFp4E9+KbZEiT}B z5+D;mf}t5)ELc4cD2O#zfJ`dh;27Xse$7t*)#jR^c!pc!?b z#Ql8#jIAWcOGdF|Tr^Rtr3aY0Ld26UBe_(PEe9O|$^%;P0MB-! zFGe!a8qGxOO$cUd9p7c`NtQ0epo;bjd>!nd=mShFbO{IvOg^^* zRtsNY@N`fK?)b)FFMjTm)83OnmZg*!H>7h&d*_f&+0a*?!A8IwV&yfqaEv65VT2&N zTq5i;{%nY{!}zOKQDa1v2V>>A0|a=NnLXj=WYW$Fk=@}2w5$+RPxQ4QX(JgMU<|7l@Fz(c zMC?S^eK>al+GH`#zZRrM5D2<2g5K=0OSPYzcL;I~Bxbc~&+{Aw74-o%^?>psN=3QL z@PKXbfG0yHFpGuj5R{yh7<8i``-lk& zsl%`-v(BQG*hUlJZck<$1@*|Gu|Ycu(n>5vzzKFZ@(G-G9}ojk{n1u zI}zU>qQ>`J_~@=d7=;~kT(~PO5GqYhm;hue}^o4WC7n;cdTP> zBmqSpz+Hg%a~D7^R7X84a%K{vKkB6-D1`Pp> z8V+ISmpK@8^aA;;~6wcDA`v?)>NLN>si1piZ!tL=aMquZ`h&IC@4u*(RBpp(o z<+mJj550J*tb1aa3DAuvV|Wxg2!*bfbUa};eZuWNq1=^2TRAf)NC7zb;*T^Bpx$mk zQVVGAy1&!Dx``7r67xJ3^L<8`CTCe5U@Ah0dk|W^>4b2U98x8Hchc3HNDw#)9lkJQ z3#CUxsdszFj~~C$h!G=RHsv_f)xrEi-6?SEGQYW9zo@IbYs)kMoCUa&tXj4KtiJ{Q zosVCq(hZXh{5=8&L{eV8?%lv!x(rlPIK2DF_VlES*> zpzS&KteLc%0c_ux^3)?9J3EV99n(4%7KRkIjeuX#UAD49kYjBL2`GION?#_=86EA= zHni+rfvl=-(|`BWtraqaM5dUGFpBuu**IVh$(N! zN|Z}RH7Gv|<;CS?EIuj?(AcarCPVBwGM)|QA{`H?|(;}(Z@;v^q=y!!%KR&HZ*g`U@v z0M!N-bR$-_pC;iXI%f64Rp)Id?YMn1oV10*V~)mSE|w61m5we)G1~qaWxnc`F$Zk5 z0eE%JINDL8x^iTS<*hjZnuO~hLA@mlAm;g zOB9B9c^AbjPhIM;Km`0J$fFhwQ##z5tV_A_)rA0$CwvP}kj?n@^Mp3$3G0;&j@$8= z$t-V}8;#ye)Ox0R86;mp{R^_c zoZKoQ-JU0;%0S+$C||OYs)3c2Q#rF=bX?p%(-K+d$+D_h7t1$BP*(!naKt>}bS(Az zl5W3*PYAxjpZ+bJawbLp5y$OOm!zfld!6Zjo_D{cgT|3ic43CSyG_tBpgjN@BL?~}hKO@3Faw3L^l^roIhhn?K|02erWd$SgxR#FGx z=VJLr*4#Y62w<-rRRUl&W%9KAK|??_0M?*v%?DWJ%9OhYlq>N6TmB{iIWyWMph-Z^ w2;|&+(*v3W$^1I!{sG^0K*k@M?A-T3 zxB(obxHTB`PaG3FFtGzyWCsdYM0FfQXV0E}cFvqRQ9T1~;*2x?$E;biUS!|(R&Inw zwLM_pcd0Jy&(*sBsw*wcuO!;uXU?3tyq?-NH?gnidxV@!oohdkFlY_^{h) zWBuQ-`>Qy8*0!8R*r@x0^wIkg+0sUQ6UK*{TSV{MHAn7zKiAH!t(<4b_HDAQ zZd&h_BO7bD8T^+uCt6KwTTU%D>g<&sdS516H97Z3S{-Qp&F6AHZQ8U}=_!|f*|j3- zeIT6~GiI!mO~hIjQ zww-Acai7DvfKim+8wISnEaQ6MYT6AHh3jMzeHJ@RCSwCSt2f9!p?t>fEyy@ulI+1H{UpPlhFw_^45FAeI-7RtwYwS&1lq$Ldb z8r8qFu|IFn)=_Qmcs+k)b1~nEQ9dVYEy70j&$`mJJyf#>DSsnw0_}h z`EzjoqdJOZA^7oqBsqQ_Vn=PR;<$F4E!)OJHhIuF#aLo*kuCnzsZ-m)xl?n+;d|Qp zJo_5Ihpf4cS=-YLPRU0vCmE!T_s&+@@WpGP%_F3n;T-RM6OEhXBSLGBohNpD zk79ML9pIlmZbi#a6kDet7mOrh%pX*zYvPchqV+j1~|8x))u!KWIlA``>Vw6 zQ4P-11~_BxSJQa%t0&GwwtTf9HL-?&GB>fGwK{ljy@e4x$6B7XZJ+X6pS2aQ2dRf1 z&r^do*3)bC{CbE#W8>ELxr|w#wQXL#Hg(b0W!1xaUbLEw+uGp1f>FNT1%JPdE7$t0 zt#~7m`l>->kM-zFiyGFhM>8*%bySOpHJrfc#dzKfe{N&3EsgQ)U2C&z;Aa^=->Q92Viu>%u3Q2h=>@!?!Lb|QrMh;yC9;V6o+ zaAfCe#Gm(-01toupTM%>m_c$N4@S78tFM1q*d ze(`qq6@JV(bmIKI11*a=Km15*TxWfE={uO0<_`{T?2r5U$@zWgbLHbp52C;5(}lj< zBd(+S@>$}66PM!koI1TOUf;RSC|+;LxqilnyGHjz`$S(nxU;mw$mg^=FqUIXJg0?C z_&+~zjT!&eUCHMemOADhr{1Nb_7036Kxa}VTFGaBOC{Vq4(dLK@hI<=+uGXjG-CFF z>%_xkBt2x4bE3TF#I@T7)cyjfQe! zvW4Kyr;XTj`C$Krw(U!?JVb5}G#>_j3EU}I<5ud}f!KS}A>YPm!*>oNeg=SZaq|#4 zFJpWYHSY#@Qbz6+)dY9U{hDedec0C`^Y0?ze+%`;eE&o}+lRGP3pqpBvn4(ky*-KX zmhV@dMe6Fcq#rgP$&C-jY+YQJ&y^3g`x2V^EdT8(2b@FXXfD1*#`t#@;@Ldfp&>2Q z+9lg+DaXdF&3LFj^vd7;Nh1xdo^0eFpsgJFeGJt{?pg6*Z<0n9k`L|$oUf+RCRZq% zbN^sW>u1d8a$d=>x#XjlLHT@5`gJhY==!M5=OcI?scHX_)@=K$bp5DV{w-f?R<87X zwCh(+h$Ed48vbPT(91dSuSD@_hYrTI|Fk_$`56`I_=xkx3gy6FD_dIP%kLxOsvn9; zhcRgh#m8EQWfL*SmFwu?bLAIGUnoCOKIrjv4W7rv5i$?_^7q`F+ZgBmp|}wF<>!Vh zmvO}Ig0ERz6EDA3^eHYhyr3zbMcN#AeHO?@a(tfG9G?lFl})6$kHLZgXI>urFliOP z9wsN(OzQ#rzLUwoL43BxzrVD$woXG&w@5TCSMf@*7M)shvih-lwDShFl#``fO)Rgb zAFoDxf_wFLVD$Tgt*eV~ zU*A*Q(biL3Gr8yVK=WrfiffvCibEgmDZc%&p20&))}1+eX7%~aU)+AN>w6Deer)f) zk>7v4GkV}+6#ce2ide%OH(=?!sq4VF3g1%%&s%6E_rSSl7smhmAB)^X5Rmr*KA+=z z1=t@l2j{Wv@WuCWSsLJffb(4Z%r)%t{gwXS^>>)2fjal5#Zvsi&oXgUPbfbz%@^XN zaq0K@Sa-L4$`i^@O!I{}mWw@#pP8{f<oMg zO9Q$5&8+;JS|z#C6W2$@8;Sg`^Ag6;GUtjIf55jQ{rJW9IKG+TI7GY=zLInMS2}kZ QcRWLF&RzKA==Y`n1r*`?&;S4c literal 0 HcmV?d00001 diff --git a/frontend/public/favicon.svg b/frontend/public/favicon.svg new file mode 100644 index 0000000..b2417ef --- /dev/null +++ b/frontend/public/favicon.svg @@ -0,0 +1,3 @@ + \ No newline at end of file diff --git a/frontend/public/logo.avif b/frontend/public/logo.avif new file mode 100644 index 0000000000000000000000000000000000000000..527b8a4dd5be3f37487c9b1303dcd226007f204a GIT binary patch literal 10435 zcmYLOV|1Neus*SE+cq29P8vHovE3w%Z8c7l#4519UQT{A)3Ru1*$z?Oy?Z|Es$G9#kiw7w}&O0u~nbuR%2NU=#mN z`qu*hFw7k7TpY~oy#7vu1b`0woBI3E|D}WdKN&#ykIehGwfH~B-Nk|5-pRu1KPAR* z1~hZxclCC4v#@vN|H}c|0Zm-I`AzZIkpdTi@~-bqYJfIGP6!AxH~ue*B={(~MJ9s&S0 z&5Wc6&5tQU!dOV-li9LMpk7wWX~NdS-78d>}7nusr_0nE02}xk^#a zy|6w6o<|lZ*8?^g;dEQH2+uDofBbiuZ92YW5iaaTG1t&3o%XJK^uFBhY-n@p+QFK% zDy?U|J2?pQ7z!h#C6d+INxzSY3htw)iqA11@dQ`&eU{;l|+p=-tqXr-u-B2Lm?BQ>%B%9WJ|E04W z!XwP5n+8UO~oI%zHU<`(Q7tneRlg12A zewt6-zTZFY{?*qwPS~R16;_^F4Fnc-Lo({t@>%Y6&U`*)-l9M0L)+u4U4$dqUum5J zogNvTk=eIy$AW4G(sK+G$r+<}Ht+%m1r zg`6fxwiDubBEz6Dx==fQZR>4V2;uu}BZaU)9F9+Sd9BNLzm+Pumj(3O@b}WQ{x#(L z&5n$({d#S1R)=kd2c+k)#YXx(Fo47~jy_TWCIbT5s{=>hmafiknZ zvwO#Q6$-;N0Wuz36%){l+xTEyL$to*EgyVRnrbP~w+_9U?@PG@>{`8|Xd65o;!kUi zhGJCS^}*xv#RH;b|3=3^igv!z^InwjJu0d;0V2z(Bjy#fmI`Z(;peV%jyWI7jLA8q zdqV{iXxL{Cu_{CvRO6x>Y#XaHPb%}s8Xd~{v$}0TWu~rOYN7}O>~cn@RJG%;(RP7x zd!^##yR;dQDAN{i9_ul)t#vAyL^6G#3FvlL<~9@Gov0Sx*uSoNeX$^u5~2|rgsA%hoA8q0M8pagzlZL&Cauu!y%117(jPw& zbLy?H-gBpFU9ohemY@EKYuNFL#LaVu41t@R1F_<>#mih`H0D}NWV-TQPm(?Im9s`W zu%zA-?-|;*-oLgqn};7VEWE@?HQv#jQ6QkP!rxODt4@nY{(1@dxxgogDIhQ@G zPb=-s$;el&A~t?nb%Os%^yfS`(b-NAk&`vUfVf~D5#riY9#YomH8n~IG-m*7-_!C1 zN~>flnG9ieJq5N(u5Z(1wx6Gpd2cgWeg)D>J`tL5pH z=L5P(B-e3O3fQM?KgALB=zpM?B?}R7~Vw}_7Heg3xq{bbW`02k- zBwOc{UF%^v^bxQ*h1fhXtV|WOmpE`z_eCB`eAi%}ZDhC~FbPTAT4^4*>&At=;jH!p zHR3_7Gw=OM!e>I@y7aLw0t|zUx^D;O7iVO5v6l%ekM|GSP<*-GB>~m0#(hWqAF}*~ z>wrDC3Y|T93?hU)ktiK2tpu>IP{`$d(d1!u+`}+V; zi>qf|jRi?Ef65Jg#6)Et$EfdKoi}#x&eGF6Pxt48vJSiRlpYFcxSlou-Yjph--zGy z5`zkFZlWDa=E;T{FZ6{^6by4Pn2WAa{1N+%yQk2qS<2Q5kVB#ZHza15t9?ids{uvA zwjp^%@I*oOKZOx7h65!NpJK%N&~+b^7_D;=@lfhpV5GiYlKYp0B-rh|f^ehr`&9bX z$RQM_(T3k`kg^&!Y2e#n;mOvJ$xYey{l2DEQT@)(M7TeG?=6*FYhYMU57OD~Czr~%hjjOyT6Tmn8p7>1wu(}sAmLUUwA{6xZl?1Cl=M^$jC_n_t1h_YOw)|USt z$=AO9T6an%A~$}~yE38G4hjLF z%a`>4&=LmFTN}p+8G!!qq3`q7clE&`>ZFfSOyk6mGNm643nochpSqvxDaV;Qqf0WQ z0!OE?eJ&(1f_HPE!!?o_{f>zr21-gDPjJ7yQ%eOuI=Ypp+=3dgQO(P_PX|l(!oT(F7PELK;$rTg=fY!ECpkh;#{7og5?11A z-PQsYHivi_t-uf?1HkIH!>Wxp@lJs(0ZTBlC=S^_G$j&+U$FIDLOVVuRa){y2z@&o zcLsX-kk*`n{8sf22F+C?G3tvAh`C!WcYqbz_yoAMZj$u;T8$TTTF+ZxcF^Z)PSwkM za2q>iywqTmdJO2N60x^!(WR_Wa3)~0wV8LMVb9@-9Y2OqjQo_kf@q$k&{IRw%j5c_ zA%8=B*&d(n-vLB;(Y$2;V90McCd&Sj!u)WlgLgEi8#x@EF#6N=%-n5(1(CK3R=Pi! z968dh#c65Do4n}JQLX?i@t5GoFa6nldl|2&4)1<2KTK%jI?Xr^lPGbn{fF#`o6Fu6 z7qzh)qN?wN8csa-lsJHKY0oH|Pr`CM4d0TUeZ*;P+gwRSW`UY1Q*&<-i8&~VUR&|5 z199wz;&7smi8cQhlxALBO!EMtbIk~ZK1RQ6El5bE{lF=8941FiJxpU0o5Y^?fKeNX zKY=}TR$XMvzm=LliAQ(yp>3selkkNyyvnItY_ov=TE_S?)6^HHyLMYRMm(^YKJ8z1 zo30m}_E#I5M#A$L1QpfZS6Z^HX6gO$LieGGA|0=OgC~`4`;NgM?f2{%LB{XsA-PGU z3}+*eDTb{5nV%3yRadfHU-m8b_reo&K1f<~5b4Q%9X^D4I&lMYX6CM6UcM-SzW>4u z#;pqXX17^r(BKlj(d3PtHOt_h0zmzV7x>zjY~$G+;gRsPUDD9mwNTKTQ%Q}th1h=A zXRp`nIKV-qsB=~`=w1+K2pP&=n$hTx_=q)~MDcpmbR&PlH)3Kk%E*b0t=iDMt8D(r z)`pOy7Iah6RE~R?T*YL4i5`czMZe6Y6+BdX!c`V-xq7)FLc(~PWf4DT7oiRVhK)ahS^z zX6@u?C1))<6`suu`eD%fY{{6dQ~E^Wue=aNM+h~(TGhoSqLw!I?o_je~DL(SqI?goJtRT2_K?-G0AjxJxi* zc5cD#CA-0cy1a3lj?HU+6=kFVl6j2Lt~Ztf(w$HSJBiavJ+Y8^Inum@Ttul#^XtBT zT)&V*(!av+#2z}4UTUscX6P6J!sv8BzA z>ZTJ;X|%TKN#4}z=B?5#Td*9;BK-LPEtWFUS04L!MT&qCG;6Kfq82qSnyoGD zBfZnny&>XZOcB&U7`IL9VzWYu7Tl|A7OE3)FL~m`Q*$AUk3#!X7}#_pufrcIlWRE) z^Gw_K5(pn6H1nLGFgdaB_x_$EaX$CcT}nNzSrsc7wz`HL12>$~oYp@#W+v)r&Ma6! zl@`D?PL2og|#w>?%7lwiY^H~0W9vZ1Q9dlbYz$p=q2l_ z{7i>EjgX%2zME9^V*N*FGh@%U{FMy_*yXGDzmJ+v>@YUxf@1E9-!RDL0 ztW>MZ6}jJo9o;1>GE7uC1W!epUYWKT83>fZG5mjTElYcm7~ObG9Ap3amIiBAFSA~d z0w04GxS}UnvKGPC^KCkzL%KX_z!+=}3zu8Q*M=uw4}LGVzVzGO-K-482NTY|Z8jZ6 z<@#cAXt!2IXzs<*jAE|j z+J7;fvIxV3D%YDbo5Xz&3KORNebZ*IXf3(Dads?(7!8=Iv5#BZ&7P&2MGpnsX*Jfq z^Uqp{AzzO)YZ0+Ij4(Ggqj#__v7&~&@KkU>`QCt=myu?EiPV%hoRCU z82(z&fbjnwFQMhexnBtQjqxCb%!p$Tm|z94mHDHHzT2qB(J#f4JrOcKT}YHv&YDjc)o42~*aw+4a}|QVSo`I4E78o7 zmunK{eJx-&JJ-*nQ}N};=Rb;F8}Ii-oSCE4TMCxad%9=hu)6o*cf~ETrv<=zk-N6M ze(|GkCL16NmXQWnsk89I8nNi`LqjOuPDvjo3U>Bf>JClCBIK#NYEs7>jYs zq^qxG7-3)VuAU8owltHH4Ske;ZceLWA+j zmG%D6X+nq;ykTPeS`SB(KAe%N+=h@m4gK#=rz(j$0dVU8yB^mN;8{0w4r?Y1F#4eN_X5WSZ}z?f#CC<{xYuPlPLIt<72lVfv$z{xhZZ*9yq%SL0TGc%*CO^ zaeYpVe>ZwE>h4cEdqwAE=T?2Oz^8-HZ1$X1r4e=at)Q1q%JQY#DkjAf7S8)Z_n2`! z_t!LHioR~c_t>flCx=}wc7>kYi`rPOZaVK(vdH!Ep8qfNtZBFvK(AA{z)t^OOYN8umt z#R_6eFQt?3g_oXV?gLvXd2%*M&^t zfVMq9Tm)od-+2<`Qz49?^N5lQM?h1`PD?#3^LR+vk z1$5|4im=SxjqD`BlqtSoCTGu;LI1pfX#ZwWjZq)}L)jgF9w-^>)HDU*5gxNheZNgz zystty9@?9(VPKoOIgE`wTP-3pU>CM(j;j5by09}}A#n3rW7>dm?Fd9{50Y_`0yC;& zCmr!_AMTiS$?XH}bY)Bz?y8z0G*e@@6DzUVNb+ajb;+;}6oN zWkH`J5QA}qiS9R%JsthkA@%{LXE;}tkTg+h!<$}%T!U5$Z!dSO79 zKinbxqkJ?3=LLd@zk?-Rd;$tdsa!AFje^xuo7ntEN&T7ruz!hMEFoF+g893;WLLdy zQXTz4_uCu#vI_N5wSa%CPwj=I5T_GT!%v>$v!tX>P4>kbPOa%cu{wOr+j7_Mvqz`k?$ z*Fi1%`D!)yt)Y(Zl~<_y6#V$TJG-4h%pOb11f<_|UC**|AisjN(M$Fza4CLe3vY+4 z319IisuYuc@%eeBB-bayN0b|Tr~*M>hc{BJv2!{<+K6R|RI*3+AYkbWd(=KlT`uUe zF|)8y>`b=LfN&PJ+LI91UZ-$r>+E)}?a>d;P?_NM_G*NLw29@I6YP|JQu(*TDKQf)ZF0i6Dg_SXXVBRYXCgNRM?2){e zXD{*rVS##h1klC*LR#I4k9wT~p2FEwmxxbmzdt*MU+|!x?b8r5sA~8*cA=5!mL@;b z$M4*KKVmxjH3Tv9(=qc_SD)FU9~R}KkmXu4ys@zLnSQI$J!*K5=*rZ1wS^y{bGt^8 zEZWEyrBLNmBdEwy9jC>X=M#dlGw%xF04M(XXj<*SGl}&Is$v1Nw$=xhMjo^fdP;TD zVmgXZ1R(vpY=z_fw`@g4odL2ZVknr4ivHWU(s1bq6*+O0KI3_nDb%**fk2b8+O*cv zY%RRvpMPpQn=&s5a2QlAiq;H_bV49_)V$ z9P&MzJ;AiUne6=n?O?C9Z2q{Eucz|hC^RhipvUT=Qg&CaaO{LxZ-Ie=KtT85e;$k{ zMI=<-Htomm>5S{Imnrf%_6YibV%&8_ z-eEmKuZ8x1@I-jetrQrZm&=nHy+~{6AY(|*^>~#kT5&1qm(tINcDs9g<~KENcGFp2 zMvDrWgwZ4`e;o{1bhv}rOdQEdaE9)ta{;GvAbE@SU*}A>M23qyeLBj;(nAp)@_oT| zH_Sjd<36=HqQ-=|qr`om{+_>~hZvf|)`4rg0yH=FKWZvcZe^e=j|;R>&|dc^2ttkH zn85!K`HsG+66(==Y>KxbtWt!xu$YB6GnpkMTo7p*R^y+}tU53kNBBbw7wm9@q&UmErD`!2SH&Xv{V9rXj zNun$#rG(7Id`qcDg2mEIQ{A9@9UjXs^6@3;CQE^QJu%QDKc+Z|a`K z3)-$)4ptMwKZ9BnExk%{d~Ljk>aa~qa>$0XePv5ZUj?7?-4dqT(l_km_x^>)APwu0 z!x;{G&{;zRwl}_UI_TI1dSlI%*_G5$dtSgTNR)`)ZWP=946-9}k_Mnald=29sA-({ z{4vo$pt5soVXVAn!}8_uUQw)X1s1KYH}23n-EPWNn1uYOkT1y5TaJ*2r?D1XITgp- zP6jWczcXDkJkmhb4=*p~ry5WZj;!x!bJMk9bMV-;rHBDkjo6Jnpgn?c;XzSeWBjfffvvtZqkO@~z zCyesOEfW+o9o(Xz_uboKr8q$h$5WE{4r6@bXLlmLQOFoVtDS8$aIk|yn4N0QZ_tX* z^0%PyERxM`^+ggvL@WIawMO32AQJZk6`1F~F*;!c&#l(&0PbKoMdnWW_nud-qD4(4 zjN)|T$lzcBN)TjIe0r#2&SsepO^?H!R&|gSYtvY6UiGR8L1k`pi7sk$ClSMk_%fV z-wN`KIA3Y(rFFvSM+`2(Px1r7N&KhRSynN_4Lzr@;EY2sE|nV3LDDhfGMi4_S7^O} zJ7mB04L1vPs69C^uJFTZFLktS?klXf1m%<$ispp%>p86y=n4Y*`?aEGx%H%g_883R za^v$~UJOqRlhqs@Qf~;xYNmGq1>UDR=@?UF9s3HF?7)3R3I@mdwtJtecy<+^85znC zysMMFpJBiE+tt8jtqBEeMvL2D#C>`1MV`SqYKYFvCKFd}N3-s^1kx~T+E6+pa>bdN zS$^+MgVQJ`a;vet#Z)VrBoN9SB`5_iJ*vkeA6KLYDoQxcYq$+tv_5jg1zUpVdwA#_ zw^qjxkKFi;y=K0cj=9i#jbE5DFVR%f6-&YB1Tb$fdBxd$T0){yk|iF!aOS!Y(0_8K z8O-eLS>aN(n6E52Yv&40)jtCfgd3fU*mH|;_1iA_xe>c;zB8jc3=MUxtv>E*@hTi= zNx6$EgVD{HPqc?zE~}b(dxe{h4sLv3V(8xS{=Mhu#nW{lOM!npFtrYCQ25!bS}{vG zIMb&1d9c-*Hdk%1XN$(BbPRfD<+JuLH^u6u)Bh$}E*FGFButih@sL{GD1JI2vURVz>Ae`2kA^jxC_GzRe~wo? zVoSxjD_g!R(_E$A=PVe-JvlscR3}S-K0@!&I;T9V6}9E^9`hq13=GqsEwAM3{ zY%T7G$wQH}Si%))A%mERU|}?l`9Th@Lan7~C~}5%m{V<+jkDI}(iaj?vKMbXQNi^W z;#PF%Lc|UC$rp-zH?3)-u}vfw9^L-z(Ey`Mmb57XzQ+6+tqZ*%3J3g!l<6 zcizb5bk9m1(s43IrUu)p>!Hy`-;52YN9LSX!i7jcB8DZY~NVSR0*53O|unz(#>g{ zBWHQ4j%9?AEBuDgkF7*S~D8?|GMsD&1SlbIhaIDQK?1P)jaamGG zt8f`No)_rxXUCUfAC4Tfzy|JIAdTrxc_U|gXP-;^? zE{3nOQ)j+WH1n98L)-={jBwSwsGNOlC`t^@E>f49Hil4c%~p6zQTeMR$?ase4NaDM zMKSgJeY;^&x_Loy->>EU8i)A(rGG!Kv2CZoe^oiXoTRT&vRkj`nRrQ;w zH^~20t(Z+CAaS#UV>f1R=H(5$E1-O8lXVjL)U){HHk~_Wtsn~D7~7XJFt2a(MY=kS z7daq4#d9wIR{llbdFtRAxJoW~Fg}eDf7@S4?|FaHl=-UA5S$HRZeNAq+%zVQZ&RYF&SHZ&K8|U1^fCsPB27PP?O&lM^6;wM7L6Q63dFG<8AbbHKuC% z{Tq(%mlXYJVm@&`sjkHR3*GU&dP9TnsO@x4RLVBG^*HZw^{^ik`htdg+hOUrW;IhG z+M9a!%%r}BA@c&HABmcP@U&O?ZS+feyzcq{sbrva$#y#GNyxv0fqS(Cv_c+AjTxChnF6093>6yj0^t3)?Xz$Qkg40`iRp+^5o)7oQoZY#a*@iezjelVH0lFiM zy{&C7@hD&M-J6|mM|EV*KQFRD`7F1m?Fp2s#2zh>2;^Cj3y%dmBra) zH>BV5YII>I?Wq2KFNofWy6acQ47z_sF6eZva5b3mHklQ!=xuzRh=cn07PcB**-Ap|s5QElR< zGHi#tlOK+I8R!im`(0)q)L2D_Pfq<8Bq|s}aZ2z3u+)q_gWQ8(dWpeWU|Ko4P&zRT zvWKVkRewJEjkGgnoXOdSnH}_`$OhNv*3-1xW{?4-Gc|7SJ6HuTM{0vaoYQ{1-@*^s zP#zOMsg~4!(6!J$|L7-1AV7l9?hUGCR*K5S3VxRG-LL|oV(Qy50a6y1^^ZptPEX0K zyv`@Y-KHrYm=&RTdZ?a}+rEHIrt$FuavyynvttE*lIY#pYHVe=@T2^|m6jg$Kf?K~ z*ddvnMMZOuf$^&z*$3o4`nZ8__e3Z2eB`lvHI~7?loem$?}(G#v%@gx4Yc+okw@9t zhoo6mSHbjfy*IOrDE8sdV@8jcDcfEsUsQ9p%PN(2s*Rvs*biKJ;Ww z9F>qO_~AloK{KXVQ}yw=)?Y`*h0uQ#90-kS*Pmk%8RO$;F(uCI}=)~z$=+M|F zw@53b!cd;jD>Dv z``-Czra#x4JYSdeItA21%W7(E^x0j-ExDU60;2&U7oQ86pPO_3ndl&CKl~^o%bB-b z^YrA0reI@^{i9oY_>Cb=f^d73#RR!KvCy7*chesyX*9PaC!}@M!^K7G@$af{SI0!E zN5YS#S7z?kCD@LmZdC1LR0_kYMP>+N~a$JvMydOsq`U{JyGyBM>29DH+`P?Jf4?fX_gKPg&ob=zx ziWtFSOTyEB}rE7rp{!nRQ{+d1~_SBs` zAhQ*cOBL7j-XA49v2RhTrN?Uz z7ogE^6RJ;4daD1SlF-+DV{HTKg}zk@Z%56t*IdVLT4>)MENR??@9}0^Wnu>;nR~45 zaJK41SJ09RpIo>f(hEhQ4XSvvx*8UW;yp@Hz!gek;TCNgxe})ntomiTl%w#|Nlbki zs4ZZwS`cvDwQm{n_t7s!$2q(;a3nX+XLRd{ESE|Q;w9#?;#>^y?vo3%AM3kCpRdUG z6jU$SrHl5yP>oU80%gw-ata}w*6f3JAx-vIE+qx0_r$soN-2|x z`ri*bBT!9>J^^(?V}F|t-r&PRd*`Ues8FNAD?W3nkvR~(TL0OZJN$I|r3gJ1yysvf z6jj7&U}5M&r^p0s;O6xunq`Z!o}`fG6c8#%TJq#x_~ui}1C3ExyqLnm8B`Z42misv zE$U4$v2hyj*e@E_9Ymzy^-<6nVMZo-LG`1cHTHIjZ8+U8DXt@$5X4w1@Q3xTJ~nH%9VwUsW`m8)y{fl?(z?-Tajv z@>(q6);vaj$y+28`=7D?;4XwT3ND8@ll_Z9xfanBqfImupQmvWV(rjxwVRz0OU+lF zNt&J5iIJ3Mb)&q4z>we30_82ESp$VCov<(uxz8B(9qO2oc-f zvIt-{1B5}!$w%wEY3~=>zLh}vMbcEg4tt!!Z>zh&3iu<1WS ziv{}Dp6 zVsF>LvH+3gQuB_m`WE~+j5DZ~UIiPe$*Kshq3!F@#J6D`|sjHtZ9 z#!WlK(bozzhx6LS1(9_hi~2|ZC>oR)p_aQHI36~dE|4f&U2BMfHehhG8ss_g)%?zW zTu4<|9}K^xO5wEDo*@;!Sg!8L^n$$-W;*^a#12`gm+2WCeVPOTAlW*Fjr4{B7n+tQ zW6a-LO!*@CATZTfv`C4Xjg8rjUuVv>=*{%Sri;vENVq#BE7CnmR)7@593>qVj~=wd z9CnA?!HQJ5?cfk+!$gZP1OinL3M%U5kf&n{ZJ4W*-J(&SWU=NcE%}aP{w8RkBJf#q z%(h52d;yB$ZceI$CtNobnVu~RJ!?21B-%1w=TQ&~cI2Hr8VU+q1})S5wVX4(V$r`_ zG5cK{p1Sc5{VrqvxR<1ep{g!Kb#SBHYQVUImY`VB; zag)xU@Gr2zp$Xi_=qXU+EUuz#iNEzBx1ezMAPp<77}=r`;P?jFUc5xhdi-YykJSq( zC6jiqdKYo+0`@^y2=ekCnD?htUJ0+e3DS1S|59MOfWZI9904p%t}*6G;Lbo6p$c;f zV!*r&`*MFL4=dbk$XWLW^+Bdmn)ILlNwzoy=GbC6NbquT5)=f>Aqj%oA8m7vgD2S? z4?wa)9EPOOD!m{pg3`yc&~ti$Ag#`I%r1tH(7n4dZPtA zGCAi0DnJmrd4W{}WE40f5{hU_>oO{a6O6XOXuxzD9{x|39_Wcz2OhapSg0z9mtB5Y zAOgub0!wROz&4U&(Dp*oogzA`aC`2jK%}HHVQZqB2a7#?U;Tf|E(eR3q2+Sku3c`~K$2v-o>6oaj$#wCOZIse3oibW@(S%g29!dM%fv}-prrBe zZntU&ebF#hN!rA5j>AaD|H?Lp2KGP7!9~vsx=>xbFoH=E_93>vlZ@b&8M79%s!1ha zrBRIao5H^+Qp%ujy`dV7l;Splrd$8B>~jF6+(1IvfjZC4wW+EpU2d6sv%ouQgWXgH zfZCg}ff4|+-Qh$>K|2PwfjFBV6)p6!Vh`Vz<4LH6@63AkIe;xUm~tSe6)_%E=4Akl z8ZkD6GxxhkK4&N%CKBrGr)FRh$FgrgF)~=0+QM~yZzeN8P+-CQX;wJ@OI=An;skxiidei zt{YJ^WuN8&6R{ml7Tt;{(GVn|oDK5mf0vC936HPkaQKNYoXFQTbj2xY(=pkG*lPqL zDoMOlKTo1!RLoT=$FGEyI0^y^Z0F>Q?)wx_9FurYM>=Ra$zHx1L9Eo&oe{v!6896Z z?;#!{WF+g7$Xx+$1)i6AlK@bt{>`zO?|vB;7sAz)-wDe%ZDa@(J^@~8Vq=-%cQm>T zLZC7>6B%9hdvMSRc2xG^_mV(|y<%_cta%3(gCU>ZeO#i7f}3{m@F zepu|9;}HlnVU_8dRi8wL`}`ik5MgYA95Lx+7chyo)Gy(;gF& zqHy58V@Tfu<$!RUnaI|;5`A|r6m=@Cp5(q*H4lsuK>ui85uV)YibbLacs|_gdeZa6 z+%z6SuV^2pVN#W=X?2jv+G5XlVlW~EWYSt>LiyxrRr~QclHieHv6yLXYNqvN61int z<>jx+pd0l3+vcY=iYM9OEnsBsByIx6q8DmcBLcUZXnrX)=P1WfW)c-ERtDX#dG7?7 zSeuTCbjq`YccPA8b^o}DRsK6gVu?@#OIsrTzs6dMJr98;f*(l+O#M+`moN?~dRQk4D;>%= z)~g%I^nl6a+Oa4-UFR z=mM!?+Y#H;7racGsf?cGAegEFrpNj|sN%GZR$yEx3lw>h5eomH_sj@YaDN=#FPYs5 znI=oPiBzCVJV{ys=FD}P@Eq6$<~t|rhfIiooU38{W!IA|u}nyD2bVk41?t>RG+0~y zR2)I5;J97YueO}u2tk7=>~CRjrKrV6`~yj?V`W^`2f-<|)vunU1$xi{Q{l$5Dm~N) zAN+gde=AfMKOO<-G^;w)gq23q(*wdza8$|xg7O)8x((_|PN}V4VRQ;uNe!-MLI`jz z(-5-2V~+Gs#S!J(Yxf-~M124JIkwbfwF!Z?VgPi5<}=bZ!!HV~=VeBZq@Cwm0uEet z@|&oYkSSt}X=m1uc!NKz5VnM^r^emd|weEnB(wKO%$45@Co)Oituw# z6rWRou=puNstWa=1N^L`pAI2IAVVZqs8^_VBaZAFH@5;ikGGu~a?@1i?^cBY>Ssn_ z8^j3g^lt^wi`5~alNVx3Ve)UwabmW7`;MLK*uKjk6k!b{Ace)quiytZB{?ZUbg_+F z)E2a_pM+-){CbWA`<0={Mes^LC1_!Lq=3wAT|~c1bY>zH9U2x*df6I=X6Lqk{VJpj z-*zsT+puOMF~SCj7a0r+66g!|GM|E2$;IzQ{j{ixj3@IQwl|21P!sQh=T_iFkLDf6D-KqGAE1Z~qi$^+fS2FNfxpjGdC(!yd z(OOv0y18%`Aor++R_SLA$-RQqA<};^Ne3!ZjF4@g*&9iVB*?_ZzpYR{aaZvQxj%2B zrS+Er6uv}T){*3V8Ow4%IHFz$Ry%I_na{8m;i~N9*IDEPi3E{aFkLui*|(_RQD#p z9^7FItdfGaALjyW+Q4bc*CHn^T}33`gM(E0mqU^LSJfb2J`5vI$O(LD9U9*%Z)1e$ zy;8`dl?`>NPNcmOBmXWDsxN(V+7E)br+d^V{AJ_>vx;4o=#N^g+&vkW+g)QD2HCx+ zbl(7pyfaG^XMYdGy`xR_tiZNG;-sh4X+30_7YganR3Q8b%4YhoLJ-_qeg3_m1)CP# z?vS3Q=WMmf9UEsH6VmnV8VG`Z?Qw7KMP>q*di86t)awpti|WimzkzFtwgw1_bKL$(uT*T;;z!#o~krjI=Jx$-?_a1;A(1sp-tEHWN|4701 zzWhXRRDkkz`qm2)2OCOf42e4hem+0yvtEoK?$RQxK*7{yfif01*6j4larnVp1-CSB z$!q+eZAh>*Qd`4m{B!N%G~!j$VSziBaEi2KWew!O@Wz_m{SO8(b%j4BsF!;$K;3qI zb0L%>EPEvcci$=QCR@35QV1A9RB)R-Nvd=g5s$r|Ps3Se{adhs*Lhj}dY;Q_&0@%q z)@^nu9=1~E4R%rTOTHC= zgFXCVQkSGgOT;-8S;4ml5Ed6jmm_Sf5XG#@GuH&5cIx;&*uqP;9K8gK4m{;Hh4%t& zP}ppT|HUskzNQf_c4emdCU~ZfUxcL|8#-lBb8wnjMSs3pS)v)flV)gGf6`!9@9Sui zVEeiDMDFY-VXX%>=Ok38loG=U@9z)B+lN?6>BM-`5N|l_;auLdNl@%jgm&s3^2{Qt z4K2_5QP{xaGhyo$;{1e3;`R)Ht+;2CpwEJg1bS+Hn4|w~+^*ij z?OA|hniITS;N*rKowhdI+-dg`Sz~W*XHl^B3^zA(3)T^4HICc%G)S%Mb@LGCRQLlSKfmcAyL_z@K_~u`9PBN;i~bb2wIe&)qUxj-W|F`{Wq*57f;Z7O&UnxKQKv0V zR*>3p#d44C=9QgpiChhLCgZJhyunVX<-KLHIJDocG{!{cd8qLg-bi1S?BezwlnPCF zjeEL1)arfogFa~1+kbWN&M%)SvS)2NGV1j!=@Q=#vLTiFHLb`cy*A|X`7^C>U))O9 z@j0F^r8~dgZ$5~Xj`z~}VHbtJmZrCHf+49biPt!j_HO#R=?IW2(3t8Q`4{t+m#}^O zlP{(EnNM+r1LxL)UZWUDW!@lS}BFGtJ7<%funQS4-S1f z`!|mR09H^qAOr*e0B~XeodGKR0=)n}nK+n810(W)3jr{emhKk&;1A*dfFGbAz&}OT z>YJdq_y_P0;2(e=pdbJLe|!8V$m)LXj)AQ`_y3(y(zq)kWx`xBU=J~2{~{~_<}5$t zMSwiThx~}J2PJP%(iMQu+OkAvqvHmLP*iFB)2Rjo0I>-JVXZMm>jmh@|{y}8#QG|tC9(2Z7p7D=s%xm(|uJC5jMF_Xp@Q9b^j}- z`OYl2Z7@i+ab>$dk2ltpg-!Bv?Z1`75(V8eI3hFSz_=6U-Kai$H|PYSH#K;8`2bH^ zTv=q^)|R!mi9*6z8G8qoM*P_6R-rMLPoG@On(XYu3X~f$Z3$lNIqSJq=oQ!|4ydco5F# zwD~Yt-#w8FjqB=5<^V1B?LJA6Jf(kTR7?+Rr@W9J*tB%B+BVjsjr5(3z>@V3?ujgB zrsH?+*Y|ikqQ+@E7dFqdX}+o~O+Qa5ie5QmplRC_&k?0VeUHc&GHhQs0}=~Z%r=$J zKJ)Sz@}>c&df5?oBHOo~e0VKqlj+%lq3|Vgku6&u^Z==aQhiQe!UF?9ICYT}>OcM| zNwxO4C%!F?;72Y2KT!eTZY%r8L!K*t>PC_NtnaPq(^OG~t6528KN#q69-{D+n zKd=IdpeBrNgtgqhL|6mNSbxZi0C|fK`4M0aFaY|HrPZ@^ddB`0gA@VuOH53^CSBez zwMe@-BT91MTJ8!tzlaEn`<xcEp7BCLR4_=x_i1{Ve#le|TrrU;OJa+{|30 zuQ1w|#=;fD6bWOd-&Js8T_X5HXGEfux2)k$|Nr05e#B;)7c2VHEZE&vK~3YFxnx+> z)FUQ0R%id-qFuWlTwW|KW(mQ^DXM9M*{s6kqHrAN7xASEf<5y`(PO)IJJw_fVg`Dr zZj@P(2h+yraZqSnz|eC8k?)|?T<`z?;NSkGGk^btZ0R2bOCh)uUqB+AzbvXJZ2Jc4!Z!L6?qY=$2I>;qQcX76q#Y~fj@G8wb+H;=&SlbQ!Ch%tU^r&fd^I?tWq~YfM!_hvg+i2C-hjg&H z83R`>6IYLNwSU&7J%}Dnp_p@uOaa6>^(&9TGzmDH@nc>E(*YBC>W7446gGC_&W_Yd z$>~#HkBdN8rWtgD-Bf(xTbjn4OdB7c)~^)JH<-whpy|%cw`drJBE)3eXavY0U(c!K z5X49EWF{K1ir?D1p%Qpy_no9HsYX+pKtqc-<1{0Imihc>4OO&n1hK>nU(3&7h4$Gih`) z6z2KX=XOxwq!a__5*;}bc;BFFSBz~2=IWtWP?^t0@y{vGPOBySm4L>@2I3tA{B9Cq zGb#y0hXu=&A{$NfmiT}?5`aR2#qU3Rxoe+cRVg)B+EwxorP*WK86Qd;$X04srp5YC z-e1w0c^NW?T{OCn;FymLD=TqH>?S4&HeM)AHvw^?f$iZd?_wE`k=I^TfHYr9sYi2N;4mr+0)!DFN*(8xH z#^;8nlbb_#N*A074sQ0cAv4qGG5@v{!|IXsksz1=n;yj4BOwGN&=}!G%p18pFZ;=e z1}hFuWjPE``#LKTsP(s{`{{R8=k=lrpD6@BbzO@9q%P&^Y0l&%lu&i(pd=Yg6NVTxS*+d}vT{drKOWJ38gS}AL+RMIL+yXG zA%>W!XBk3X7`3nJd7_^yoDN}#b@5KSrFmROhyQCfqNrHvJoYtP92e^CmB;Jac=^8W zb4zG7WaS$xJ!U&KXTrCWl;?88h-g+E;e^FH&SM(Z1ezeCH%At z4(a;rt1Hbuz_o(iscZqAVR4Ln0pj{VANJLPvZ`K^b1&(=pYL{W!l^&`FD z_;@c^lX~Cpm)Uf`e|ulPSmh5y;9)*(?|jca6R!WD$sbh6e?3xSx%d6$1j{=laFWu{?94?G`pWUvXx?i4Z;Qef$ zCT?5X7kia?0MXq(XPb^r{PSkW-oN?gth#B=VDhce99@DBoxk~)THQzAmwk80j6H0e zQgw%)v^!isT(@+0K1}63r#$UYTBAJQbicG^uRZSMFg>6@D>3aU;EXSbGsm zzeR`z@xETLk*@*%5kHGmYlW;F=wcm!)BXnZM-eXw$gs+=(LA1bce{Ic9vT-tl!6O! z=;8tAU9&i@A6yJQaxk-+1NcV)vEJ%Yth&TqX9JY-)j%Zgu8SNhW|?5N=6Q=4!C!bU z6Y@02*DKy@bsA51V?kaIr`>{(AYrmPc(R0>d za3g&E3j&+5v-sH1QBSaa=b@eQ0tP zr56)B*!AIWKS$t%=-p2>-KW0y+7JePhpMLKwU_6cN~&KdpKF^#%c=4s!ajxrStnFa-%c(ZnPBP{&`4-j-+pWey( z-KM{bwCiu!$s3xxA^q-#9u%HlKtWDWvI;->#g*^>`AfzH&G9kjskI>T7#e%naFD3%=hMO%| z_e1hxlL1?lYeQ&wN{`wJ%@YS^9?T(~H>*nld(t#f@-i}t`!C6PkHt&AyE$0)BQ>WF zt2bhUZ;jUDygP~z_IFsn261XieV9z|((XZ7DM-a2LXDnQaqh)gVl|6gt3WypdU=57 zJ#I5DBn<2OcrFzfC*|lKZXEvCTblhTtIZZt)_EsJLckhxo0Q%`&k{0!H7Nef^$^QP zjy(xhL;4ce%MI1>G^7id|3STju(Hz_wtEAc9Aeq*#YGCb=W;Ygrecm(>j8_Iro(D@ zHGCtSvOZCNuzS@mg6^Gg{X7Z_Z&(Fwoz2B>0q?c@*_Qgm0$lN}7@m$d{ZUa^ycG)U z!@d%=BW2=Gb~oSD0*0Ruca)>m>{oh9r^ZD8i|nZI|pC6R<;t)%-(@d}hG1?$twzKyn%E$Nlc2Im&ydHt-pVXh8$uFhZhyF% zGZlANgx0}`{MV6b1n(mPsW~REW+62$2Jd}yUmkbid58vU*c|<~2VD%09&5QVaiXT< zd>G$yf}|R^(Bx?yU3@PClKX%n6R;6R`thk_i|UljpaAv6qfGccncVtWe9)u5@3V>A z@+(!_C{zj?sL-x%pK{u*B6?v}?frj48I8?@?U+R~zP}wz8rF#t#Qvi7v+$zqE&f9w z?)B6sZ}<`$Q(PC#uF!Eq5mp_9=JVfPoRBymIZe8LKwj_6CVcJb1g61f3mFn#yHow= z71$pfJ1rvo2ri?5=dd1v?j{ezH|Y#`MtSM12Hf`d2j6R`!-(!B;0oF+PvcVfs&{+x z;zH@Uc@N9GalZBgcwH&I31O`|jndtFvK(%VIUbs`mDfZ~-bjDX5!WRHi$kZSr6Y~? zfOu^ElqEFFstE5a`M2cYr9Bo$A+iOQ8R-&x_~ntPKFE;|kYcsO2J-{Z`A%qHR1~d* z`PHn*rqAc;D};HseH!%hLxOs>|1DB#k6{f8H<}=Qjrtmgbj|+fx9~LK@WD?`(j+Lh zW#hnW$^l!BKx+&NOL93Lqte7fOMn0-(t&J8ArAM`0Hw3h-r<+maU#{hN)@ZA(ogA` z*)W;iT>onS#i`shzF0q^LH{d^cp1e%0v>Om~ z&P+!~$87ldwc8Nw37fO8lJ4)1KP9vu7U#P7XB}Bpza%pmNbJ?JN~W`!y6In$;vPSy z-JV@dnB>M_Akj*PU<;nb>TDaflu3nlxZ8sBdyAFh#%B^1u>ueXs+{L68e z48E_-SD0oM$M3=;|9YBC{B|9oTAS7RCX zp5c3(LK)k^wJP3sqjEw4okjaRAoF#(cM$wysaQwW;A>k-{dY^rKey~i8Oj!6Z05Fn zbWS1&1yKH?(zfOro|Xjlawznmwc1X+Yor`ZYhujlB!jpzO*?1a@#{3xz31oq`sj7B z9KXQZ96XOUef)3brt5|+3{M6v#`Teug>8Ug z2b%51geT`+OrFxcMHlQtr!GfH&j={Q+c!O5|HBc~CVWs{R6mx6IoukVE@dbk=066Y z*f^5#Rkg9Qmnfs=#GWuW%XhGjh zN445bJsZqby%j0oTtCrn^mkxbr{A=2%#2$r;x8>=h>=`8;QnF`&<-bS{|qKi;sQw$ z4V7`_d#g86CP`vHiDjkX;pviZ@etXgYse1*ABHVark@|l76qy54BhDr#lfbOMvrZj zt{>bCces0B&s28UrD~GDp~?QhjvUG$@aGianen*KYW+{zYmLYYK%zSwNl@E7v0P8Tt}Qp;DOPfkY<#PBVmW~%eC{H4#FNrbi&6R z8}%eApbl-IG=&AP$bB)H!K!Vo91L^m^K3&d-|z>4ZniOO%d=$(B^#C$4+lCfG3NSgDp(tO z+=cD6pP%~;A!`wy`L8{!YIN}6CBN`;N&F5qYHCBoh;t_#L3h6rT_a#@pbuhrmKrm6 z%DT=4fvHOHQT1cO?ysJ?gqEA~Lu&n?bq3QHZZAAjcOuEFGH^~T>87Cm{Os>l{=<4IkPF?Az& zEukGp_`fX*TCWl&hTCC>zPAS-lDO7>KqG;MCi{5E2^0F_)V+UY;+;fad-f42R$M|1 zxV$5TykCT3`;F}1vPGFtdj*`+pP4lWAWJ~1FL*^|y`cF#M!!5Xo8fMnYQ^lGM_`L# zJ##?LAAOF(#<1Ntw819^&O(?m|tIyRr{Z)!SZ zjE!_ND1q#+N`0#ZX3+Z5?~J&7)@bP!J?bl5 z8y5j^G11MQNNHvjVg9t{G{i>}ZGBSy7K|$ckZr%9L+c3(5ti2L72xp2h302>l<6z> zy)C14>dOs5Y`HMB0+g=?BxhdDm%ZbUCh7jQiF?tIxqO>lEI{id9DM4#o(a1wpt@K?IORQvJai*)SzkQD^n`Q zAc-mdFxYXmHz=P$Lg7s&GjGCbg1-dQcpz(*lP7Y4FoG*E+>ERMfiqVyz_bm>nG+WA z-fNo<_yr~3(hmj%KGx4BPVH_+b5NrWKS1@~{2|wSj8Z2g-LOMbh3@}+=t8X+2572% z?cerJWYYFP`YYCRz2s!-U+MggnS<%b5+Eg|cvy~Y>{~{S`!|tIt#A{T*&`}a^H~UZ zVAv)h{D;hp_#&xY-OcQk`CfM-#p8Ce+NiM+4Oe2SsyOS)hO;0b`j5cK^ejt@yz%UTKbqEUnn?Vhgp zT*QLhZN#{|nb7P*ieSm7Us^J4gl0MqnuA?1eXcD8plg3ls%d zETbWgL`bK8bMWQohg0!;J5h_f>^}JiO~uh3s{Dn1=B0aj5FQ<9VADPAjdxj~I%GgO z+-|bm8(ZpP`rGTDOl+Gp++DHpA3q6V(Y-o5BEW?4DGviQgip5#R9zwm2J!hS%(7#N zY(s#T?+S9Z=;=Uoy+K3h#_o~pXQB^2IWbF7-1k4VGNAATT>6sY6)^9rJz+3KL6%Dq z6~oYlE%rE}bd@}+5KMHK$H45-gn3i?U0WX2s23c$OuCaxm2$8k* zQy3~jRRMXe1e!LBu7y{1nZ#0{gh4@-^`2(MK9Vv${Q2~a3$pO#vU_sv#cei}RT;2V z%D4u0$WquDK;WZ11jE~DooRZ|Ocg+b4C9+(M+r5#B#8dloKdE6Wz7}(SEzdsFJm6C zm+rx2)pr4OW|>#=m=Ffufxz(-*INZ0y`0c7yyjKE!#0PSPOlV<58ayzWQWS~J;PBh4*PPEAJL+f_AoF*0l^{7rCQ5?f^W}noXode+j6$OUhjnqe zuYN9y4P=_Nx9Lb5te2M~FP0RLg>Q8AL;}}^`!S;!_X{GZ+s#DwQEV+du@*sG61l@k zmwP@g2ZPHAO9sny;An1VX9E~5eICS>YESC3WXwCHud6PB-?=~4t0aR!7%O>6B;|3$ zY!h-YAXs-Y|8{-ZeT=G;2JLKI1(Qi%xuNB*sPCv=7{XWPbg1X{-G zygQO0=15@tOZQHe!!o(eS1z|N&&SR|hqIf&8@?f=4H8l6`i77;jBzM-92TqxsXM zbeWFQ4r#Jgi23>EBqPTZuBs$FD@reC1lst%f7)pUIBZj3KzZGaG8DoP-)VqCfTL*g zlHqnR4k)3Ei({QR-69Dn%j`w2WOKK)Hb zohY%?EQalhq85hfk|A<~f^(N7Tvy&zIW8uWq~Uva{-;Pywh(2+)-<9kG3+CgU|a6CNPp z;!hDGr%XU%gtyl(Jal)~&rJSTt|8SXtE7Y-efmiOf{md;PgisDcFA~J>i%xHn?xXB zcrP-JAovHmPw$kD>QY8d-m^uML%~_&T_**y_II77jw7UrT2ajT0L5G8{C&r<~cbGn~S)4m+S_+yu7(9sOORFvEoTUuX<;9iZUy1Go>Qn*(wM?#P?ZC zo^!_XbRU?QQ22`Iv!g`$thSfg;zY0K>mrkq!s=|tw4Ty}iMkLhkDOu$y}TNz*=H)| z*LysQ?{Qxe>fLq=y2fVBBfHf|S95uK{@eJaTHURWj8j*+n->(Q8@g7=O)MHT@nEQ_ zK@zw>S+bA=4hNUS;7Ctj)p9~o=_p=T{??Hg*!RblYbiVZu*|{Uww*Nb$FwgOVQwS% zS3AN6+sIV-5kmV-i`m`A!EvMP40&E+TX;BaNFCk z(ZN*s7pzhWIdP@nUK&gHeJu9qRd|%BU|kEg>pr}0k>UxPO`9Wf*KapLH z6TR(+)rWYrC`XDCwSx#URrG#@T`n{Oa58z6PZDZi!KW}i*y_=TFYlAbK+%+Up+Ls{ zzTzi>bYiD4ah`{!3NXT6otX&u3KEuin^IP2wDenRfxbW{L?cg*ApduEr|(Ip--&Z>`0R*!Xq)H z%}J?%L1zsej@#thamU2@nB}H_3~`1*83JjviaaNEcW93vL%v9I0dOT7%Dv|3PD{2i ztT$)%!;dPT>`~s^nqhqHEfFO7EBA-JS2Z@PiQSBmF)CIodWsoM9r?{GJI~tq+kg{- z*||t;pm5(fLL$z^{+;Kl_csje@UGR)OP2kVH!7{g7#jkcSebd2fI1BL7w5T{Z>Bcn z$3!;dqBS&~7Y{H~2-NNk1AF`E`=*82buz%VIc1M(PmTmn@KtoLl>fhE!tLy0!8qN} zJFOIV)5Uq0A8*}-uD(mFq&-(l(_FBhG;37t-gJ^#dIvg~v5F4G>5@5SuRF=!D+zb6 zyWfMxL!M_=F*&bo4-a;-oyAB(3H%9;{y+aiwsDT&LOddDqjf<*uQpj%$~{V7N%2&wcC>P;Ax?ayu?seN z0UzF%g=G{+o@TWwx|ioXe(qbtx(P=MC9reL1%6B-COHY`AB)GXc;^>Yjv2x}Rm$+3 zVMdZ6?G~K2Z=+yslXA41KE5kq!!xq65{X^{^~}%amz?j(&zOE&%wMqFm>;atIjQ|I z7uZ9wTEyQmi8i_W_E%Nt6&g!_-(S`2TgtD2`=x})F8GEyZvALE7wz{Y=_XmUO^3>O z$(^*3qFW+KWIadZ{2c^v=Eox(-&?_v zrA1r$`o<|czbQ<|9I#yaGv}LoVj=dqtlrGx9ZKxoWFOwl!01>@g6pS{YJW&mI%piN z*&KkuU}9B%_UAB*VSdmkCroRsU{+DPf~N-IS#$ls!14$jyTh*;`)Na!VeIBme+h{Z|Vx?Qz4EKxVe+FkL9;u&9;u z=_ODz-U>Qo@!dwh+9TFB({PY)l$LLX8I9m)W7NsC!5k;2EgFUj`(>v?bn1YXI2r-= zbo|t`MLcH$S-yO9YPcby3;F-MI3>`?)>Hgs)O8|+*qmGn(<)ffIQ59#tL7j*M)t>( z+|6CDzx`lG6bg`=84fz7lZeGy6Qg}h0#0r+f-ulZ2LZiKg8F7xuPuI+$a2I7Z)|z_ zfz$)}G-E@97rDkUka;IFt5{1FN7T`~maV&iL^s>sV=1fS6Pi#qP0v5*v zNpuO*Q*?Gt{_QSy^H*m1YoPJdiMfW5Z>f=-2@1A!mnFjigAx|dt{StItvs%%jJo|< z6}E4kx?JN>b0IE??<^TYhZJa8Hqae^TlBK3v7#%SrQR)Vzn_EF|D9`-(0eWtBhu`9 z!yk%Wl~ECO0i_|=({_5C;}A4k3EJ^gKE*9teB@4RD2eNIQ8ilgSGFTJVy@Z$hWuo_ z?8=||C1la|PD7`LB7%d*NnYQx>mzdcAU}nMa#(O5fEYi$0vj2b9J!C0{71(1c7wug z*6RpkTiP6ev3?kDcor94x2VbpU<2*HxAwUK_Uz_h3gj1bTT>+Jj5Js*qWA-#D61+{ IEoBz+f812~%>V!Z literal 0 HcmV?d00001 diff --git a/frontend/public/web-app-manifest-512x512.png b/frontend/public/web-app-manifest-512x512.png new file mode 100644 index 0000000000000000000000000000000000000000..0e384b4525675c0194d5a93560e4018fae5ba236 GIT binary patch literal 41804 zcmd43RajhG(+1c;2O3E5;10op2X}XG+=IIXcMF=}9y~Y+p5TGt?k>S0xLdHsq+g&AqJTi47qT)EY9J6i@GCqBf(ZP$@SOSs z0+EAcB}COd4G)%)<^(m|SRTizue!7`KRlx{VWBI@m3RgxFCr=`sUjjG*@afZ7Oai+ zQk|I&p9-vMS_MBLaZsY)6@iE<36>A`uGG#qL3Ga%sTbS97qi|A>qVh7S@U$AADoUWu(Du7McG?Rq~W!uwG;b?!5}DjMNM z?*pH;+mK_RwcF&9UO)f)f+j(b6G+nwe*Nk%f&Uj@a^?YT;CC}9OD+lh!wbMt8X0`` z#R&R-1N1-aiIB4rf;-^wht<18{^j|W0$}UDDu(~r-$$QC$%zBU6(pZi|1UyoprQXE z)UHOwTJg$5|Fg~>9u?{TUhn_(wg0>I%A-W?=d@lvHh7V<@`2Ysu{P01!u`ueCl$ch z34SlhU%rCI0aitnp;#hUU^9K1|1f9F4C!j(#7op?!A0&hkk)uaO z*TV^WdvM(r%TbiPN(IuT?4DQj8j%QgTChXA2od1{J*?tu>6nApB@MTbBNmZ>v+AIz zx@!Am`-F^lR2U5hJb``pix7OZK*b|*7vx`#8eu#xQN$7<{(V#)3lJ<9Mq&&8uTF>< zA3;J9SUvhS!v3KGQ-wX)YkQ-jAO3qT#u}InYi=HG78=r3gxu&z#+)j#KL11V1B|2q zob%nkBsnp(4Uml8j(-q(_6j2WrK?JgA%8+wM1hD2Y)=xL^5!+su_!sKFmUE+WHZji z|GXY1PA*WiR%P&63k}KmqmrF~=3nJd2?y3W_5Z7#@bVGB-|{@Wr+t4HfPIek|IO!B znwSyFe{{3vDY9SxN}*8sX#1~xvM|#|Czg&4{&+kX{^2aM}>hl|$kbS$CI`&Z-fnc&wzAKYzBjDfvZc|sWX z44qEnVlYQz02$Pp;%og?ATTZ(V8pl%119Z%rPO`^J8=Fm=l6eEFm%9Nc{J` zq5n%lobab1G4)gcH)@y>$C3xH6)fZX3laZvzMr83>l|8hVm2AD7> zNBSw<-$x=0z=jGaU-tfMU$zi{@tHBRp3ncDXeq#WT$DO79^?aQkd?%nKlD*>i11V} zVwj3j&tLtOP8uFSYzTF?LHREWtduavGH%wT^4Ac8ykL%{B`)UfU-JJC32}feSnggR zk-ymGVfYURxkz9Oqwn7M68eAJMs2vE{ACgMSI|toZK+>zlYf^ zLf%o|Is)YZwRSWv;6?B19}+~|{X?~i4$>7>P50*I#=kD!TLR#Z^ydfpf5jdh3>;Q8 zJCtqYuXf~01G4*#In<#3SDFB!P=u=gKM+C!=Qm0tjjbpB!>on0F(Q(27}S!?L!aUw z0cnVm`_5KXZFcaUuI@k_^83w4tx&QChjsG!~D}t!g zf8~P!yCy-oJ+J@5yR7Lg7i_t&p)4J!YUTzdpY z#HGDNBBuC1whnu^{T>kaxftIO_Fv2U0MmOZ4g$9S>+d z2M3>$Rf?0d(g8bqj^d~<@b4aJVS9XMXrlh_o4GJrbS=)OBE|%m5pxghBi;h%>o8kb zAJ+N6^Y_g(*xhdt#>!kZgP|Spa9JykF354WA^f;;p!~S0JhQODba;M$Ro^x|!n>1s z(1oq6uls!cCqj@lpezyB6c}k@t10y@=2t(_{aF_snh)VqIV0MWwG7h_8+Wqjbu6>K zA?7fX(qmY`y!zou)aO}N_<$?RfE2&8#GIk;b0*Q8_erN#S_WJ_+1YR-LIjuu%Vp(9)#UjgFO54_d|)9!jKa|l`!ahgrCKy9f34XeJ5Z$-O@r!-&xZAa$3uB z0346lYr);J_Tn9->0AJn(=g|(8F$n8M;8V6Ww%*sZFQtouG!!r{39vt0 zKSa~}uNYP2g&qh2HLYD{AVxvWowA9j^%Ersvq z=Sz#TGhBU%aUhKBupOTrbkFHg1fCR6Ii3%5EAt)SY{ZTl-gMygtmlNzc%rX^59%hB z=53Ac77m228>?4?CpUIYNv!*4dtleht`+yId@AtHPS+7@v=KD z`v%7YhPMM+7TW7?Z4rX9Jbv-=A5?WFFbMhIZ;)BCc>Y9X$IPt~G5$&k5CAS*&x(D_ z+@08y`qLgF+O7MQ^XzLg7QbEjTBG}Qyr6YL@xbRNBlWU9C?glY-Jp@9{V|7<>R%|bv!DHFrBPiuLL|DbhG$7 zCt&CD1uB6kt+dGjyGZMY)3!D4I{(51c5sf?GlMNyd$;oRD2sk|8XF*B8MIG`_x(F2 z#;<_eL8I*D+Yt{3UzMmJN~B-0zy_FLMyH!G?3uEU*>F4Tn9Y4P?=r}gsN%J9_bWAE z^J(SjEhWwnDL$`s5!T_uC#{?|M#y>p+h;u}?gAqfhsv)P|xH?>BNsATr1I zM;>j1Pnfs2)0O_ou1sC+FjL_&b&`HWon>FX(k;3p>oQ)HwR>>&YUlO|>4mRm#gDm7vZ09Jkg6+@=P|Hn{^T z`{5;^_C0)W*eWz3RtN6}=!>amZ^_;bvGHOrIaK%eJ@NUc8n|2YaXbo9f|=Xy&kkgj zz16OOl?AQUl>`1vzN^mlS=u1|*c%OYBJD)B3S~gUXM;IbPZtv_Ye?U1j@yjRyr!Og zvTH{qZFU;=qyN+03O8v$x!y9vT^FrTyx=-!td#&48a^5PPUbtMirrisj_&NsFP7$& zUcGs+GkeU+nBb%IUgJ)TpoTJ z*q)e!9To)5hjuwlV76b)%HTOh; z?j^I~mvoTjIw6}*>?l`Cb}8c09`V-gDqQ%U;%?Z{Ke2#yj;D{ummMEyKLO=^%4B&Y4#K&s z54fv8FuZQk+Q^d$ihr^XwPr^0#FMbgXduiNR^unG$N7v-ffct7{!Q7i;(uo@QP=?= zBq;)1%n3SC!*>lv%P?K^xC%rPATPx=(ssXI_86^b+|DQncWHUwS~@k#>GvC&6;<%5 zSM&{qQ!)QC{pbCYv|fRtGrsNU?arc44jKBCb47e0wm1`G9=JD(=9vPtfRNL$$DBXS zW2VvF_usDkmg2T)F(&%n7HtgnCDEhr0sDRgogxVR8Jiq z6|ITgV8$QC&6g_7!XiRV*J5p(L=Zt1#{sigyn* zce}~~8LbEAEU5FVG`Z`?m!J!xd(y{)4jNj2267D(I+6{5cIj}Bs1hE3{_jeP(z~cdFy%pX(dyivf=p0 zUD!q%yd81ku#af=^@B+iLXa8Q>Mme|Xr=GSd$yTtd&@vQmFw+nQa2fPX6=~zo-}KO zI3~$zLb1I%5(D;w(bIl_IlA9(aogU@#GHHTSa1O!oJ?-OId?$fCU_Mr@N_#G8x^nG zO2CE>#8DW>Kj|=Y+n#`{fi2Q?3l8Hk`{BTn_Wi@<3yiB3-}7PHoQJ5_6@B~dy*203 z!ChLckFvx3uOzIWM}J5{+Ce%NP1myt?@YpLZ(Y`c7JORTq)C5*am({Y8REu+IYCYM znm$}!wyj{PKGsws_Q%P4md&jIHz_uiY@;Un?l@6&-p#lJW{*bsjkWD?ZH!j~z`eo< zi||s?)2?}>1kND=mYL2{8}KjKQPBxBH(PKte@O0=J+6@vZuzxcw;peWR!%eT)XY(4 zQP7q7dmfmQeYz;p?uwXfN3>R@rji6(yPWpJKj8(O;?2J{x*f3l{E)BOH9;Oo)FD@Us_rttTrT27s0wKgX2~FUny#&pQ z*Teb73G%^{0RJd&V_-wFy~Fg>=8uYUc00V~Y)Z`y{|*p1=w9U_36@}jX6u|PQYZxE z1jnlC2OTF~l@JwY-Ez0x4!4xX|3bvya|^>3MSoI_^;XaJwq}U_(v-rBiHs4?vmQZJ z1M_FHg7{$__kgc7t-!9joY9;872GiS+VC_=f%_-Ud4<)h79eimCPnkP590oD`td)( z$rwz8939RNW?;rgSag!$k>S46QB@ipP}Vn!opaN5Fa=dsB=s;tLfHr0?vo#B7Dj2+ z-SJ}RZ(jJbb)!EMsTFN4-KyCO!aea~4R-uo*NKucd|honf8^4(|IAqC9(vL1FX%bNb)C+GFulx#lYUs^-UVtyU)wcw+o z*h4-d;2R#~Drp5*iC-lW{yJ#dukiZaJiy#?(sAFEC<7J}kvnQW@W$1;kLg6?XS_SG z6kgo*vgD2Y!jctHM3=}e)Foo7$QJWiMp_PmLIAl#%+Y6YQH`0T7YrW)MBiz|;;Uc5^{NdIa6S>iC$8y@W>XI37_V|?@D z7E3^5%nfES%J+)aU*7xX=>8`mwO_w4FnFM-`EcN~1Q%L`-i=INc&~yvI1~Q>Voo{) z!cwnI%5Dg1BzWoR@!C;9pKrC0#n0Lao-jTNAxw-sr6Mox<30@q`GI4#?5p~Is=?8s z&<9>zKEyJl$~RpsW)_ANfmY(hX2 zx>?m9DjJfY%HV-_o9(HqFBzysjV|8oZo6APwvZ-qQ$H#xth349T`N}7B>e4X69smv zVmv5S3dEg&6wtpYG`1I#C;_~^7tGtYV8-4V#0GZ%lVZI4N?+t)6d_dDH6%u@MxJA6 zIrk{nxIH~+;Q!D#>gS+DXlr9+d#zaV8;JjYn};*nDJMzsE+7)w#qnc>llgo8P$^~q zC#O^PKTG7j#bB<@NDwW3c8&eAB)YvJH6pBT;*z!Ksoi!8(Ec2PdodRZ$-T6!~{CF=nL({CXao0_=zO_ z8C~4hddULr^_{Sp>nAm8v@K;!aQ%7Dblf*4ujAlFSnDF@ofmJ{RTUHxi3GrE^5rrIlP=I`+u!ZW zV&X!EP=TOn`~Ag*Ukb5X^Sfwy=%X8adbiVY>$Q?D#(~SUk6z#2L4GvzfUNfU#FbXK z!_TkKf<#L#?}nXhpQ}Hbx=YAp&NQ1#{TrI;OPo6*&F14l+^xHctuf|e?sn_QtER^N z#U}!P@X4jspP@*Xj>OT{&s%)T439U72#WA;!r{_!g3rK+YPmM*GVvID+WR} zLe)f7NSue$<$&Tz@XLEG06A#E)r5PyC%6f9r1Qhh{rPBR%S$1I>7PK(0K@c-|h>&=U|oSN}UgR75L^nAXGl*XuboOm+`XJ zg$m7`-{LRc7H1Hbjk!&J+o3-TSt{Z9QW4vN6WZP82)`sThUA68Wsm1fA06v|@!88O z55qsF*%`I%y(*`4GHc>#lx9gnT54%rJBjF@@;C1aCyi}huW@C9;z4L35CtHzpavr5 z=aCNjci!O5pQpeq@)ej_y(m=}0F{t)0qh44p-<$a~|Tu$$@YRIao;_O{g7 z{=3s~PTU`vH3n@C9tJP6S%7TnT}7C!EcC*f2M$+%JdAzAb?(J}&)iJVE}v4q%yvO! zCId;Xpc4a|Q!k0f5EUW)t6&~D9wJyV#9K7i>>D~#E}q#2r5i-0j(;l&dd7E(bN;gz zQJHNdbfbE1zoTEmAC2#K?!jiFvQp#0P;cME!Vpnsf zvA=NbZlWwoD1T4toazs7YDhYV?X;9?ze{o>jV;3Lq|RMtkGEKtG7loyVU z$n!5)RvDyqGN5UR5hp-2V3=rvZ1mrefH5A`_U@p#?;u?yeNgN@@-0%*YrCl0pqNww?qOLuU5NWU?%MJ&69khg_n|q{)tqe z4W6`Q$UnN36qImELYYa&iCp$8!)|{&h8jruHZhXjiX7*#e9$b{|hu+N<;{PP%r)r$$gKBPPXrG){g$o|M^+MLu*q=vbZXoyBq#{eNA3j`+ zKye0z9)zXCIYo;>DJs8}%Is6&3U@}PGruX=Kns-w*N;6cp`G_Yj+3X*=h z!1E+yBB%9!V3`#Jd`QJ@_Tr@O3EPU_YKF@r*l|-IidQk!y)v?5!p`Dl^w#vcY~Ymg zgN}$>@BU)k^Xf||Tjtgy1CP}vsyX^wVy^pVr9tfH+>NJR&hYBaAOdA-j}o860BmAXpXA)n z64&bJFR^h`P$gx`--HUYe>umx6?JiCS-WPhy;&rHC6mW~xDr|z=XCSKi!`MqquinM zTU3w9&hSZz>y+es!-Gi>hElsIa7j{ovdl|V?;%st#I{*_6VVJ|idwWS&tcTThEV|9 zLoy~lucDF4yC0HPwrs#jJfF?yo-RPnsC#fru9crKa5A)rFH@Vn13jm3NYA+)Mh0^o-b3DFmt*ka@9SFwr zLHE+fn6%UkYzKbKKF{oLbBGOxZWe-u{YAILV%4!W;tV{40i&bGKYjRu8s)}Ks!8if7{;q#@3nUAAO z#bGUr0>!3o%sAdG@~_9K<51GL#SN+3m$(X>4rZnw1As(a*4wTW!9E_E8CYKVgKt(@ zdl;fJA>J+eSrV|Waiw9C=g*-;=LTub{YigzC#1JIx=)aaV^1`+c38+hNXVS~XBVY{ z8kgC{MI=m!OtcRCs|W_yg}wY$b15=OX7TnaU08q9mjvQRMXOvYt$}4!ISLUjpeJa> zDUc}lRaYQYVGtIhHO_9k|MspDsGW@Hk37iwo=mr(8Wg21`P%fUk+jBUuzF^eUKAtaKWV z*hZoaKomwV*YVnTb}&Mp!P^5mmHR^1`QQ?wegB^GlaOyBwoFFR5#Zk4MHKA>bz#3eVo zz++JED^LfT(ix`@Ztual*`A`q`KW+Oa69@%b;aGyUWjq)5|5%nVyzl3cfR~`n z&_#X%QuL8enuRVJq&MwI+thtHS)hADhDg&fWc=}R9Pv_^L-iEYr7#XEI%J_;G)o(B zx5FL6@f_QF_O};+>TJW)-eiAhBnQQS%ia`{njAZkRq~A3eWHhyklR6 z@PYw=O6L*1Py#b#<#ETcw%p%a!ve3K(ZX3ed(-%*fvrJVe8y%PIyL`c#2sq`LvNkP zIDJPHWjp0<@-Fnzig=!1MzZSrjG2Up(Y`e22IbAvUTBD5AFO8N{0os-a`!>dn(B1|0u5s02EEu;3`g%0mN zEpBdAD^ly|KU0mb9pQV8jMbr5W~xT)+$Px}w!j$v4Bq@7m_d>W9#?7YH%(Y=bYHQ} zfq1#VG_msDw~^}H6WZjsj5pIMZZPPob4Y_^ORbmtYJ*Wt=%T9ecYd_XPhyGMybsuy zFia_cjP$yp*Ssq(Nz4U2Q-(bIu7(G`3|e5Vra-;gN%rVO#Wwbhx3rWGE)(TdFuhXX zwYYf~U?czZib*EoImXKs`TN(s6+ z=~SW^QX7u+V5WRHC-2y{A`as;vWk5>vJ57w+B4CB5FQO6y7~z4eJZ3*8TcXWnGsD3 zyI(>WEyq0cl`M4sz6~hCY$g13dj)mR2FeWLq}x;3-{lh9_9&RBaZp$QFxVIAtJI>H zY3h#Y6M$dlsW~5apt1~oeoi0d+hZ>%NgwFCp@?Waqxa?*U zo*M@pVZAyViH49ba26UbAy9me^y%202kJ2il3(MT3j@`^a1hXncrkqq1)v^St@f1p zi->W;c^eE|Z@AG@VSFs5kcuW}UmW2-){hn#f%_g{aCG_Oy4CR!PP0j?mn`kaxEWrg zw&hNY`Za*Y*19luESsA;kpy%2B~dClFq??n4hQ?(Ev^X760YLUwMJ17*{D6kKb~OD z{*=qU_n;Zl)I6-YBxMm8s z3EWwlS_maUX}>ZXnN8mA*ZN;gS5ByjD*hU!IW7_hE~PDxgi5^_%=IQkheYdDZwlxqf^O&ljri_*kik>0N&F14U2v72i~iy(@7Gq#@G)b;od zQGMct3-~dPGOJ89G!YwVDJ6WRff)XyG}D9(zA9|IuVv9yPAV9Nckb{!EW~k1Sfb-m zA?QLp-bl|M#F+aoJK?nBA~Skfw}z+1Un|I3(gY z0WLCge)^hbjqQ#50j(c3HB+M_$fA|Zvyqp~d`|0;v=)@0+Bp~^CxD{b!xZOnh^3ll zE>OBd$TI93kBg?Fr)DboHJ#qv|Pvb$LNw(F~OY@QzB^?hk*f z$v=cbw|1aRlAFCIO#ou85jM5ezO}SJ@H+x*9iGwAr%Z5H3{+7k41{PUj@nq@^#-&T zNI!R}LGjP0)PKf%+nY(7oAsrea^3o%)>)0$=^S9(Tg2aH>Bmy#dUqZdMP=>^ZnARn z!l@$-5~u7|6TOKE!I6*pwoB5Uw*jsjKawF3C$7bZbY=X&Kj-M9^J_T}Q|p+QtF3x~ z5_Cm);|B1LH^tF+%st*6=VbXDH5QJR6d5*>tm~yCPrf6a^0_uYE0wX!&a;99^$18o zzaybeBa}h@a20wn(1t#~&p1@-wUWCSs6&yESe=~is;o%o(D!U}kF!Zv#D%o(gxWDKwf41j2)X-!Pi(*8Nig2#G`*y_K8YB)oP><-7e=x70mf?; z(CW>lj_Oi+je&1PIqBFUp>&dreO{S?tKbj2ilqLG=SFT}0Ywes2=Lg!pT!nd#~y@D0=lQ6?p?1Z3lw2rzJPy4uHmm{)u*bLx@s1-(`LcF?^$%x4UnI4!$z#M2hIn}}w_Z$4 z@J{7M1skq}dGSIEO*%c{Uil)*-j}lk5JI)>E z+F(cbuA*s>DyMf< z-g<_X%(gvVzCNFz%OM<-F3<5n7t`vXU-jgqh(USwJ(@{ud2r-TWB}x z?vkX|x(cmkl}9${GUQ`2#@vZsiJ9Bh5uZHMEH@EhkkGM?M>SrnCuev`(xLwLh9rp> z4(K6q{0xfq`fgf$tYS$CcXv>${Huwlo9F%p0rHJLEH&f}n?;T7z`HKCOCaW|qIQH> z@Nm|#Ycx}WT?(niq31#FLTO!MeQq=dj-u8!o|Idw4c6jVQ>KqKgUm#BJ#kNrx{`>z z>{_3yJaZ80$WHg2QD@AHfae0%G zC+Sh_-ZdNEZd1H?8s$#w~Cq3`-PEI_lPdKueA< za3gvY8;BqKn77%0W|8fdSFsDd(sRr3y0l#z!5vZVXYM|w+X&G~Q`BdPHRm46Q9<1& zr8Qi?})imf0YdcT)^MmxL==?L*{F_fY*)6p7uAJl33B;XHHE zN_rY0y{Y6cM=kOP6|Uh6Ly`6U>GdhhsBU&j&LQs~`~vS+KMje|Dldh>F=krjm7yY2 z5uez;9WDt$^9+FO`@b>Z-!ijCQcB2j z*^5~bV^B1WIh@(H-!^!yOHZ`6=e8OCawrudEcYxz%8PdMcQ{u((b`46tbBn)mjy`< z4%*0o>n%mFNm=5dfY}^PR^aztPzENY2fdlGn8XiMPm;H`Fo=6*#<${}!{t#??XwaF z$|a^SQ0+<>OS8H|O`Uxov~|z)i$1!I2yegsuxa``x;?I+xrVvhTpi*9kEDGs%Alp- zzD}cyk(~C-<@>4k=Db>NU(UXprf+%1gAU`TTZ)sY*Xd;%$%kH)GRt`6d{$-LzxZ;V z;M!A`V<;dAHxL?Cc2Ydq9!Y_$Z`trHBCNCp-H%G0_8z@HG|^sG+nSQcABloUV6f86 zcZazWp{gfmNogT?Mtw(lZ4_OPhr7G#=ivb6<6g(3P?O7JslE2Hp3j1;_5^(1(ns*g z7|n`q%9~WIDsh|6Y{xg6j6O=ra-mYD_^3bVKFY;Ro6K?x=0t!Hd8Gq2MH6L>j9Yz> zPL;!0DGq9Wy`*GzITK$5qT2J8@tMtK-1+r6%FH4{lUx!X=xcicj2n59f;pN`b_x6@ z^t+%oq;s6Eb{sRjjWo_5&zXCuFL>p6P05Jf?iY1G!3wtlW9DVn=B8(&^*7v@k5j+( z?%#Vu6=f1ASE> zczo*Km~9nS7q#Hgd_qH|R1lA?By03QOmiLjhFN5sM?KVqu}_>RoS@C(%-_SSUgAaE zSq3FsCP-pf-6Xf$Xo|eBB?-Gdk8d+hJS@y^b-!36;kiJBMw>5CL>GGY@S@ zHj{7EA4rfB#<0X26n}p9Wb(v9wPC=-89(D!(5PtX6+DWUPEngsktp=#b|To;nI3<6 z%~MGjlpSyccWFaGe!TZNJ>mQs;VNG7#T`|l>J4^6<(szw{EJgzuO??RENt(zC$)}k z^2vq%xXB^Qi;7({PB|45w|m8M}pjm(@&k3j*@JfhLhpV;GE{ZelqFVk$aV5u$b8V+2pTj zok61ajm`>D+ikL`oQG!p7bffcxQ-8|@JP8oy{ z0LR6j7SPP}HK!L@EKjJ*rMt^NgD_S5;-f6xvb09puke!SAI%)%I_Wvf z8mTfpU=Y7_3{>Y`8l?;_AMYdk;UA^Im>cRY)#$G{=)PqO0?NX{lK-?*Ntra1MI0$oh*>DjSd)`EmK_J}1OH`tT!s3jq*YP<`SsCy34!vzk zjpkdKzE3@+#ahkxc2Ry@J}~{NmlIpe-l?;<5}(ZjlB_AP6eHf3lL|q~gvzzX{o+wp zJgX{w_g$t}k(d|FI#H_ir1_-o{TEw~z>&^hKtmZkWoYA+L#p|WZp5)`G5^-9%3n|= z(9UVYMvNXN0S0D<6B_X(PLE@Taw&5I-ER4tt$Z8=XgPueJO;w2?QX#b#I+3X!y?P5 zwo&w;d6K}}kDN#9A|m)$pIiEqs^`7xN-dQ!Z>v53tku(7G7yzx982Jv^D*w3?rqrB z)8$yF+F6?PW@5S6cbN0h^80;3uP@@aAMQ2ik~q(#Ngxx$KiF)z^Gx|}N-}vmz{-^U zxP~*8-W^6IkCYcRtLpYcQumwaxI~p2r4@ZzB%<~Q&KNAI*~B2tCu(%%_4#8dLXkZY z^WiVAvMQ`kfdpuEEQ-g85BeeGgG7m>Ya*t;T&u(6xAo(5SX=Y$+vU5ggFQci{R03K zdCPqExz;EOIa${#=kcO=mek?e9`5{DXBf%gR}Q8f=EPJyI_B67FC;Td2;wzqI-OLa z@K({P{-Zx!LXJSmvUOtA3Je(Z&8xNDPpC=7anjn6zDE(DevRC3jbJmOopq5 zOtxDru%5p+k8f`Z^WE>U7{_f?dE~Iig(GuN4a2G3S6i5^ldtLHiJ_XQr%GfWk>u^^ zXapK2Hc^X@KDbHo$S}y5z2@@P2q{`-3W)h%-VzJ(YxHlYuHPs70j4p`NP)mMk`}4NA+0W$j$~Vz% zrWojV>~;_R6$)usl|PYe=$;8}e0NHXzsEqCe&QzR$gg4dnr`Ba^O3~pL{8(>nBXW% ze-g^C3|itE=?yj0y0%OF-bsIXPt2(rNLeYlSG2pF^UNrj0aw<5tB^7+t1aP&#j~!9 z<8TOJb#9SNU_|J#xXtUg@hR-^Ti`@59uSqcprzMKDK`gWzOr2_?zAFEU`;zt)}{J; z;aeNs4SSr&z3e__VmP#2WzU%kGCcBc4vi$b`=s`>+B0hky5cAWBYh1E!~1)hmIGbH z(r^w67N_UvL&bwDRo#bDy0qK#Ff@{P@CUB5|*Z)T54(XkWj=%Izpt3TyB{BG)X+GwQzo19z@(Ou9|u#{&X~ z$_#UTBQ89VUU=zcuZg{#e4HHa+b2%y^01ZUtyl(U(JDQ8tFaZ3G*+hjts}}iUthRJ zD|vpt{}gIM#2%&U7NGG07Y;4uS@=*51XV5LN$GXo01PA17l@P+CYJEFQqWc}WY#Rd z#hAzmljUJ>6Qmm|zim9}K$RyU*oivB@#xEhSIzs8x!80)+PvY9eZH8`mg>L%nj~&P zE=?xoLJvD6;0R4*5}oJq7b@VTFW@)!^vyJp+8z2Rm(3Id)W+`)N69^ zr&NfE+4q>o*SEhM77O>(zQS~m$x<_DOwOu(Hfi#{j)$dob&=%tGFIN{apasrj_K;q z9DO-$s?h0EbBdfhNouzb&}(V?1y{ti({F7EM=_~X0fK5|L8|+Fz@yccxYjzP)JjCh zw2ax)nd5@k3g}Le%IND_1G%OLo#tTw6D+%g*7AYo`UaBs6#TP8j8iAVU%t{g^kp=z zTc7@QO@Q*1kmbGb+pT%(2G8>}VZxuI@X+>tAcZS;UPd972^oZuFsmB%JhM8gqc}x%EE@DRfD2lvQj{ z!0T&uM*aTLJMc-JsxkFjh6l^9TegH*FCeE~P63!Z43*WczSQGPa zN-LVkJY1=67;^gWQonL^+1dYEZ@6WOwZxWUJ#T;pSJ7SKs)EQLWuwka{iA>nv z8Qr6p9fF<9kjJ6~A54{g$c~Bx6c6K^mtPYHIuA>ik3Zi9#lbB*_mb#+EZ#F7HsZlf zWTkxog$k3AT2#M$adJw+mt=RkX@+C&UC5Udaf|=@IFXb-ETTBaQBB@{$6^hCFWhXl zNEeRF{VCc^CX`tC>3RRls)o|N5?hU7%xX~7)M||r;y?y-qms^S`w!<-cnt3xniZw` zE_dNj1LIvhS9%(x!cv@KpHm*(#J7}F}l6X4}JRBIH;w2{U;mBh96g*+4OU(@cm zH#l)e%ai~cD?B(rLNFiSd+7kfF0UQe1hHkS5m?HvMAnyFCl$l-*Dq()BLh>NfccAx zPTtd%T}*)ug3*RYZDQ2YY#kE1CKL@5S)hx9Z~5~NPXO=A&i?cB1t7jBJ2f2JV5J7a zS^}Mq{n-1;hBFySHWY|=j3hT1JsZdQm|faUX)V?4?G)mRNyy2~FX-hax90D7kmA+L zq^Al>9~@@yBG*;)sCSmSTRtaVe;d4|--!s-89T+}$(#(E^Zc`0T=_Y;&lyYfb-9&H zV-9e`$w=BX#sG&IwBsX#WITft7)z{{MsmLPH+q z%cJ8z*#?{C3eVzjJXAB0=$%qlq*Q&33Bw~m9IY>HuaAM$|70m<8jk!zT+2XjDYE}3 zWsQa`r2OZVs=}e(nA<{9!15ZJhm4Sce@>ZhJZp+?P$+_I=bjhqA>*se?_=R{`I5d= z_Z;ek-C_*ufp)V!97Q$zqytua8x-o2gzEjP{Ut)_Y3e|gs#_Kh6)6|h#`Y-_m~ioo z;pYWGIlbNZK(!K}0sLOv@g}s8$1r>=Jb|S6*m%gOrmjYcNb7mk8lzKxq{))PcERFiGJ~cyA*2#zOAa3188-O z?jQd?Eh5u+wV4K#x=6f|3X~SU4`1S*YBvYO;O^L|y_~pyH>bMs42NGyb6I){3E7pd zxXZD#1$AF5oh}2b-FizF4KCj_Rc+o0^vAz^rsom>fL6VTKO-0yziQ#7zeQkSZpY3( z36(0?;@OvLBxD(L0#Q&l57Ts>6>n6&sJ;(a*)~;RNISs{DzL3+4r(7_|D@kAcE{*- z#CN~?!N5opPR%`=ZoJMgAm`oZp)p6XcVGDu;pz53PK2`7_1^$2kMcV#N~vl;7mz|) z1-hVyJ?JN;zA+cJD0`(WS>}8VEAGuh(=EswIl$^48tG`LXch`4`x^31>0Z-PzZk7>_x3GTWGlLn|$GU^yyW_Ti5sc%>17_MyQNa}^QX+?jw>kCN8Mer&iPt0( z1*I*;8a>Fl(D7cM;EncOE7N={NjRpMnvv*ITr?2|wLv$19#HbRpubhmCdf)RWM04S zyPeIgH<>TlQiX+w*{P$hYjum?&EV8r(=VSt0!+4rcurIuO%%CU;w;Er?Rf`DP$k(i zH7}sOI`i$CM>3n{-c2enC)CI+N<=02)!?cQlR=F$-Uc>^{7t|2V?eTA#jVw4ov91x zpumr~_(c%V9{m+Ly;%K4l0m92p+;+x6aTu=fU|w?v8S2%up#6jbT|y8V@Mp6XO;%c zzRhlbOjR4ImWeVb!*P1M!2H+~niE{dV=ohBPdw-ZQnT>5jX{RFXSBYB0+Zs}x z%YJ4vop$hOjf~^5Nk5u<=td+ow3(5wK7lhCR!f-yO2t7%B7au6#ZN;V_$>RJjTZ!# zkBZ(^l_p8{c-lANKdgh6C$s>Q8*lgllqmr(z|;nsH?~a>*RyF^E{C?L|2pP@3=A9X z$W)hO$yJ{8Zd7c{S$+!5ji^j8krU>m!E&>)O`_!O?EVG!$*xA0xhWa&j0L%MzOaV3 z_IE-wNUQ77EF&J&lUFsVt&BeVdMgXaZ^^z)Et6WV9Xba1*gBKsElrI%?unC==$5z^ zpzf2wJ^!OFyE4_BdRZaEJIlXAqU;PWQP{-znCR>Ta&Rlx`}&4)I3_8jNSg;lPsg+Z zLz_=BWBcLgOx5!{IAJhqmTk+RZxH z`+az;Q$evHp%PyA&Zy|X6&Rj0a^{VBMSGqbf;PaIZ|)BCyzB&!sZ|Ch&^Qd$ISc)! zE|?obYae4uf@ZVDSjV=X@qrGj36}{>ok3+n%;J0?py|!tFx6ms-(b;*%pqeJm>DW| zdAP7X7j<)Bh%B={whWkWBysT3c8fk&3q zqnFkUVQaLs#9o{6=?nKOz%*otf=)U@U^plQhiCyVF2(w_Qxf|TkA*gDNCQrawJk?kI8_LKYFZZoq#H$v$5g|JV&j zer3uiX`A?2Aq$aGm<;H;`e$seu(}SV6Q9|> zg=uU{OgEfLj=p9MEn~&F5}5@>{ou^1B}wyk)02mRoq}LpcaFndoikxvPt=SSZ@PoC z?)R{-Xz<$xg7;sFJi1_GD=$-3Ur{D&a1;n<2C{Dit|MHmr8WF0!%CQ7KrT`ofIsEv z2Esy7>%abn5e)JIN=)mGMXiXPAos2-fd zwU!KfR-f5LsIKsmrlT>31}zrtt${S03(o)kPX}P+->9pKMSQ-7x`HUzR#?c%O zY{!BaCG%u$L;eJ2>#$eA1j$-z1pUS=6l=7KA5*Utj(diD95o6juc#w=<7fN)N}zrSwH44fE-G z2XxTnJplD)aYP&4em^geSPo z;)LtagtInJu{q6X&g}rKdL9CExP0_7Z|guUTOMWD7$9ODXjN+W1wzZiufVcj73F1J z#M?Z@uMk*)>}aDG{}>;081S7Fs|bH@H{DJ5COVkMI9+S-dIgAXAC)fTUqxNYPnp_z zl;1s8A`KUF`%OnJ>(&034_}5YdgHNLj4yE9_WIK3VM4HI-}T-p*(M#n;24<-|xzb9*eU|{OU>K<~4sT-G`|&u@u}BiT z9hz0i+mvqFb3rU3iN1Xgq?l<%>X#gca!))>$i;Rpy|HCzN`B2?#o{|!;~8M%2ARHC z6r?9O7Cj|FvrUcf9LCQy#bF9Gp{NsvSqJEc?fGFJ1Q6Z|srNwXdh{a`_ef>0g0nyMNLESpVOorV#-0)5L#_Z-_G1z%xpAXJlTZGa;=Rq6z%EMn*7s*y}SZGnYq4X{hNnEEAJ(tS~Y zrzkg}4PZqa1pna51)waiL=0Vc(AP^JH39=NQSsJZR9qe!l>SrubIzClAO}%J zDI{7wUqGM3Ol`I&PH%aa_jcdWGVKc${LvPTa6Ox3!pB>Y@4MWq$(Vf2N!9Jp&?(;T zAM+K8Q2{(+{!h|%lH-Laij>xL<$>Cd;d)OwSLjQ`ZaJf)T!*FF&Lx9_h*?96RS&yB zb@kx%&mG>#8RU5uz#A@F_Ceqbido?D!B9-Zr0CIP4R63dDmTr(iD6ivXV8MO2ofyY z+z5@*X8B;D*-Wl;P%)Y4R)*QQr@UsNgPaD6f#Vd;*v4s2nE0H^nRpxQscmu~+`m`u zKh$dP5cL&Ii;dHO>rp4ni^LyO|K}+Ma-a{_avMos#(11W(p%x2&y?O48dsF41UkDd z+B7JR=j1;w9oxDM^(j$GEjimhd3hsIi+KS_-j#U zp12I;^YqT&O}*h9wqXGK)`u|kokJfxPt-UJsNgC0$#i^*RW4~<+_~F(@Bwk}jPj_$ zxD2u9yl zw*iEN)|L7ATeTN8R(LU4=f$(OLm&V;bX2zN1UUTG8VO6_8d;@CkfUC8fcD-;%aW1G9ZpsuO}5WlLYGKOgxa5+yaPV{b4iNhxiX8fKNg zG#)E|U4!}1-}-W0Q2{Bl(P==rGl#~ZBhGvlCDijaM_LG-2uchG-DGIumI+?B{t121 zIj2J5gQ1V~MdPDtGA}l6s>dmJ0_|xoJ=K5zddfvI-D(-2o~N9=X+WFOnL1@B5ka+LMDE7<67uOqHQoTcWred%!{wM3ajoqdY4pzttHC2{m&V=t2cINY zdvM(Wk~xwv+Fn~(z_T(-r#lzO_B7oG3J{5TF00Ob+M;qlJfOvHQ_cbk-2YD&421Jt z=e%Zc|M!DPdC)(T#}S)+e)TZ~^k)>BJD6oH`N3EN4Y5#h)G^FUCbQ>h)xHU<2MQn) zv$PHv@dnzMq$|4am`h<$W`L~_L5U+sx%6AqBRjmtptpFDi7OODW9PX^5X)u39szv3 zlmQ8s{|SPBnFQfMe`}{oxH3m3k_s#irjAOMnHGTPj@HBKVYwKBJR`d`GnzU)BIs9t zAeXsjR=8+WVvr9p&=U|-m$z8`*G+Qe!evR#qjDepL*Y^x@z#C+VxD7TC;TL0nU7dr(=SsrdjXg&(kHSJ;BS0kcX;a<3c_dk}q^ z`jq;WNsx=om2n7GyAjc4l6G{Vc~1sB%_PBp-3VvC9HKrF21#x%i1(%{%0%v4C#IyI z5~|em2Q__FKx|copZ#CuJflAx|7#|-eELA&(2s;^{V|rWoh885_!1^1jNMLO$k^Lx z=P{XD4H$|4OzSiBn+S3Xaf1Hi?f@8sCS`Od7JCIyb?*Xb4CtYU{Z|&gasUCMQJXJL z?nkJ;I&Gp<4S%;82oz>`3X3`THwUm0mD%79HbBa3Vf$S55Ic4FrZ||cyo4_6Yue@6LE?fJV6{tHE{bJ+vBjJwgTfuV#EdNe-yw*#me7Te?axDki=42{H%2D$$m2lsOv{tHfu4xK<{|rD$FWCL$ zuo!lvkrTlxoFE-##1fof-d?=fy)b@(JHrDOofH@qHqQsZGW-jOEPgoT>USB%l$g*} zcP=(hfuUhzu)@kM$s>Z*ac2RhZ7g0^CuGoT#Q}Qte=MH=yt2gA1F$|T?z{0}a5^s2 zD);U+0o+tdn%khGzn7zpASnXOdX*9R8wXpbbcic25^Jb_d_)fIpix6BkbWQKu@Cb} z1CmmAJ$bVe973_Q3_j06VgD&`cDPGGKep$;@V^TvE|AI=H@D||AMpc>H)Lqara+&i zJxZ8VN2ebT_b=YQ@JFhS`rIA5yk>ESGJ%llbVs#0j!yV#Y0!5;z2C!-?i`$z`$;*z zujHyKtji#giYDMy>&R<8LJNd;CmvPAHE0!953J#({H~h1bGG8|YoiR4j~iCK-9xA7 zRe^sB_z2S{-nmQ+)cpK1nl74^SwEuc_6lLpYEYAVpyRd6~?~jD;0Gm(4<-27-03Pww4!;2i1l-&&cbi&zYG3Qm z^}~ZBaQ(_TAGue~BkzID+>i%~?=tn!S15ye+VW2bCww`L8dAn8x_bMY6HEOQ7{va+L14Ebsl6XW!&B+WXhz z)fbzd{a~=8_|3v>YyG4Nkd@Xm>o+vm6j?icIXSKVfyz~Q4dPg`@0zIXD4nK(vV0sJ zt&%sXX(=^TpLFbTAn!$fTQ>sKi3aa_Ksges$D)G1j!CF$4sAlV=E6-o>dq20RBEyS z9?R<>pMM$L9rS2i9=Ch@bpIZZcXZGtVpnme6Ig&j)Brk zO!T_KTv0($QAc_;)aRkVhc>?R#`Zu>VaD_U06TqW%u!(lBgB-|mlXxOfXW*go2tZK z(hS0(+_`Wzy~Ce?c-#&R%k7e0uq^x3jhV#2djY9#o{jK2h^S^?Tu@z8Oe#mP-}Ip zfs0o2w2Zq-+w4+Bo)QD*#fLs;TQzVhTxAaqz{~=mVX8cBpLuj%SmNsjI$YTbGCV9@ zfx@^Es}0I>sK~T60^lP)4qvM*A9!ejS$-J~)>YNyQ34dYmrvVg5Ytg4`M7SHeGF!g zH8|oV9s3RhI?*R68fZQ{m}ucZqW>+*Y-0R}2STvRl)}6ez~)%6I>0`IKAaxr9K#}7 z|Jc|r7;`o+DW@}9PS6O~t<)E|FFbpIm6-_F{>v~8zYktCY}O6hv7wOH9NxJv8KA1?m_eH>DLT;TciD70Fa zh0Ts+IPPj3VP7;3`9?!YA>dwVPKE*4ue}r*6+GUl{t+Ry)c+r7Ji8iCQu5LBElwY? z#NcmRboKu5y%p_mk-A$|`nQPdN2{57rKIw{%_>Y+;qkXmjOFRKKJMygfTY&5Ui^0D zEQljlmlDvV@rv`fK0_~l0$jXb(uC9Fbp)|C!h!4z=5u!V&gRJ`Ravwl)=XFgApS!W zj9g4uolWHmqOQ_-O5iW=U)oN=>%&oeh39tIWqiG(ZR!ye7j^S;+tya? zM1s85&~j`elc=$K`YB?$c~1GT%+AZ|KPCEJ^Z!;0=4zz13#?>SFXM}&PKaO5f{zfG zz6tExmO;`@P<@RcAd?)XqKYn{B~OL+=0(AM4shfo6XZ{)%Sq~5`3fq?hrI-LOVr{b z>vAce`MP0ySl0dnjCAVB40*x+b#Y*T_TLNzI<~tfiiML`8Dg5%rR<%AZ`NVQh`(rK z`47=%4P_S$l^z`ZAa0UPxGSPF2}N&Z)!DaY*dOHSSYHP zz^xlo@{;G`><4mZwfn&wC7 zI%kmHrJN)FAy>2t=%fYRIbV4l5%oE#D6Wh8wm|Z_beKH*6LEQWPh5p6Y)0pPb(fRW zdBep`s5hkAit#t%>1~j5V)zI1Su0#SH7|g#W|=N9j!^jakoydX73mNzHxBK0L2 z$>%ft+Zot?gv#BekUW|1Yrxc!q?M=dzbYHhgg{*yC^Hr{3n53kYA_x7M)`kGBYpl_RoI29qoSV1k4!xPFT^QP$7HcooUb~CT!i(1%|v=&q7m5=a%kKY zAEpvDhV$V1-z4Jo@`bMMd+)0-=(-pPk)=%s{6ud@stFjuh3VvTeT8c5rmf5-{bQax zU+Bt}tPKMbbKcbzSH*|Ghw8l#cwm+BwRIoTDt8)aXnQ_x#QgPtPJD0>vm~8XgV91O z9dsW_vbutd|TYU*vv}Z!2b$=N5T$x;8{= zaX-rEX_KzJM}oCscvSDW#rBs;e)pKaHf8YFLy{ryL!dGavrHofn!GkRBR9~NSjpg> zshoE$i9FM^2viW*Cu)Yv z60VR27k=nxI15qesV*C+W9WBv3DFU3?%Cn{xa!t8q*DzDrMF)1sHk9z#gS4!zz%XV z^n2kUv_TVLbc|FRSHK!X_u=soaW!xF^YsT{mzNdfqZyOWM<8jMnNqNKJQjz^&HDR4 z6Eqw|UlIw8vR7!q-U0wp2;Z%9ExT~^Aj|VH5I^QpeYNO;9L){U!Z*EB$6BlGLLGuK zO!m}Yz*sYyt-^&9QdWC+e8iArVoGIuUSZ|7?4lieISBS;cWeiq zD;He2gSXZ;)JtKcD<#H>gV?@?b{a-{tpNdD?9)^iS!P2qo67lc;`vRgRT44G4f_`f z7b$xy`SfQ%qTg(A-9cIm9?D}l0!qofZ?5+i#bW(JxH9)Ngdh(b6I0ysl-werZ5dm~ zSV`vWI7AvLm=M5hnEQVY@|I!#l}XRlh6CwOk7OOWuuypFo{|lCWt}eDuXRQ8gAg#e zF+EcmwuAaB64;>|-F@y6U`reyZ>W5l$tRfe``!jX4FSPXBVW0$B>s6ux_RDaZ}2={p0pmz}cD{9fIqQx;?(7@Or z!6lZFqJ+Ut04r<{C|4!Psn5z0g2$~>g0UJ;$Z#|WM~20Ug&OtGUqckJ>kt+as!)$X z?_}uJ{_VlRX;Dmwq z2OIg}b)JEo-&y1Lq1_T~vK^~b#VsZ^W;cncI8sAFxOC0;GOMvqryYpIOYR@Zv^J<^;)_<=H|BQ@Rn>0^OoCOy0!(I60rbVjkdrm^DbE7Gq;~U;x-40( z#eZ3ZGX$%5y}G77V(INt>Dr}?L7Mf7AHajuxQ14@c|4FQNT2I+YmfI}#@Hier+m*e znq}e&7XT;o@C%;%2dH1axEqV0h~EmOphB*oB^QD|F0ik1ZQ|h{sK2Q-*cpgbnQU;n z*pJKLdSuo^*#Vp%Wb4%;v74Kigpi)r5c!h6?l4Cc{mmuBkpUg}3E^!Es=N)cHW)!t zkK0~gd%_!xi`0y+Ix4XShR|cs$^J?Zv?AY1!775)XwsvlOD8f)L=6$*1_>2=-@NhG zHb;Pk_Ha_nfcSDqgx$h!TGgXE<7a0$n6b4r6bBnVyW+ZNbE$K|+y|cAsgLMeSS~_6 zm~f4<>L^!|!6Ng8aq_4q(1iY3Bm7YEXsNJc(Lm=ue^fCrz+X8dJ`qpeYPO57yET(wwyW{VOS zDR#ERA^z9O@rokyI}sW4+kB%k?NCCURemv6s|Se9A8>9|4$F3$2ZUjwOBcM2Q_H#U zxjR2?WS`mY0XA0JxtGs;06$8}z}f}Oxd$j~3{-3J5-&JRrjdgS;tfp}JslGS{Sh)` zykv$@ah*%8>Jk^m+-el<2Zhy-(CKF6)& zBDLkuxdF7fcYZPI|Kwe#6vRxF;YubjgqGc;XScB|kEK?93T?x1_&oimsyWArQIF)G z;Kl|?y$7Up6*;jrQDafP0eR5*nK=grS^;p2^k1>?Q__DKovrQCbsa9?y)M!Y&mQ!L z`<$<*@}Lf^a($N0VyOF|T5Q;qH)2W8fD=}Os?AbOr_>~ax9{W#S?-(QO>Mzfo6l)9 z#WYN{1k%hja9>y0JSE~iWg1Vem?y|5h@cGcnVYFVKHbPya^ib&Mdkd%F0v&D@kLhi zgH9idFT7y1uhf4}Rf4wlJ`p?WPrbS^YX{R}_h2Eer8$ibTrCw=GyV;L=3TDBfd%P# zgLAedVj2bnTXwa9Dt0mvoI4D*6WFVP%{t;Wu~A8-968yAjwmM$(q9>u=)aO~HPZ*# zoj>jJ`G`!5XO1x~Z?=#nFZKpe)~@?`0usO&wCGVq1q>_?GhFyMk+_79KzY2hqfpM5 zFsZfO9R&9ed4kPB5m*-3BPKD~O%eGeh-sGgb%b>8C+-0>@9gVP1S$Z9$nPqqxy@;K zfd*cwZ0nBYq8+{Z)?lGnMovG)ipM%@q-<#8Mi%TWaE|J)+yZs5khMjJpbB75nT4=yEa3IwH z{+};U^8@Fg>z!ufpcn2)r=)#eA^E@CE2LehE~4w8VN6G%nX6eMZTpQ(ihByyFFmq= zgra%|$zSsD=t0B}3Z%OIT6cd<%@>w0e(PKn*HbyuXKNgt@H1{Ao~VxB8+xIssKDxy@ZjeU`9fsecu?m79}&?e+*_UB81Oh}12IZmBgkQ+=#%Zx)qwdkyH&A|*dc4U%vmib;e@d2p7mhh;FzI)AyBe5eO7Hy}8LjM;O?n)q;)zIn^Zyk1S}-r)mI&PFXf^53@kYIPmF< z_gioNap&dBa#Wj-FKG$;&R8&?xR5Zr;M!C7Q(QOt9nB9}sUSF1Y4(Ek0RMJ|*HIW0 zDiqfvbY(4@zcE9z7AHVBLn_sG9!bk{znS7seAuu5bn z^I2hLVV$8B7h>YrG&yA}GN1_Sd?9t&__!Got}~!Pv+44)esB;Y-fw^~YWnDmp%TYC z`>uGRhPSuEDVG`>rpV%YV56?34afC#uKTDq9Gy|tf7w;TI4kO0R__NK2`kgo*gk8+ zgc7D92^Ah*4CcR{qo;K}s&B^SFWY)A6igyE)pcv2pIbG{@YmX}8F%*?he;czp-AtJ zK_7!mxjL7<5$>q-tEu_v+!I zI`=t7Ub!*1VL1%TV(MP`DXz_G-`44;mrB1jn$OAdiP9hp|BbSbT~*NXrw!xbif=u1)zAeoU4P0=aIUSq zYaKdk*LLf&Q8*-IyZg8Fdhj)L;&It;Ucd2el>TwlyO?07j3(wm9&0NxQ zpi(jTy%_rGjxhyV@uSP#Ee8(;JD5*`f{c_{;e{VgCq<)sYP75m1kdI+Ef*L@g=mm) z`cS^y*7&X|*rV>42=+m_POWoVR|7i$9=`6!>WZgz<93338xlk2;jm{?vmw%Qj?|`s zplrH!SMx4mTCm2}^NBF&8V6qZ9+p_G)m$c~OxJJOi-o0iVri%M2N@d9BDqq|xr4cS zXXc$a(4aAnRqG6%O)MNq+ujdFGuAOZKx<*E&fmreJTt@Y7I4PsRoziGZp3w46OxID zA}<$k2)>H2^CXWFjAP3_N8dtPXGHp{mR#SNgOUlLk$0;}WP5>>Zy}oXws*XNCjGh& z#|qZZz`}UJl<(oWjL_nbIX0*NM3NJ^!6*gQomFh#JGMhtC~tko|=3b8t#NRvs7Nu?v74tF?Jilj(*u(%~WacbXPw@oyopm zHN6g(rfjlEU+uS7Fov zSV_!Iwu*ha;13uTflZ7@r?(cnOg+&TD@C5!DA8m=#^(ja`*(41f_mS2T^^uzMcztn z3ar5QO3s_;4Z2r3xMh2cs@HoLze5yVWPP2Os{)VA7|_#PLH&r2eWRybBT>1KFeVpXj6G39?dmrr&!IW3{-W zBb{Dp4Bg^-)>cHyffotlF`L{dWm)sFt|w;OwFlbeF&}{ZDpY!+1KNafF@UPg4cFY<7Sa zFtiSJkT_*0@ODPH`kRadA5F!iETlxSGPrTiox6%eC-r)3SV*g6*e9ANeBN_~ab z(iQt5gCm&`(FVk|MN>DAjLxqIL9)W zxfxoe8DHvOtYs~HoN=4$b~+;)7EfU7(M*Jl9|cS0dxetNUVsh0)NA&zmo#PVHeWMu zbU_^dyWBo(t~p6azr?Hqzo!q`)#`N_UY|QaW-=l&#ScVSkX7CLJHWbFH6%+hXK<^T z3&D4BeLC;kb?>dd$(J!wObKE>70O*2?$qvM$nvJ5)o<|sxx2Yd93Jr4fGh@xEv{b` z;cBQK=bt+woF=hG6NV^RbKhA{5N9LTh`ew=BW`D&SvYDRr)-a?m^rKT{)kWKAd+S_ z*?Dl>F9PGuV&tJU;j(2(_y&{L>5sF`a4EU_zw77mIY3=L|14Br68kkl7L9OyDpz-xCKqd{uDbbw8*l5#y zA|YObLW+Vu*f+;08t_AiOxrqGu|bM7P3cAeb46N3vZPBM^07~Vt1Gf-6e0kCQMDWe zlCU~e2bUuDKdxJm+BMsZ=TUCvN+PaH4_gju4Nd5=-X5-48L``eWx_v|fllM6QV&w% zL|j>1Bk6);C%YslJOev24b!mx#&Atkxb-%tX^yO=u78!Azjx}-+x*#pVn(`+x0&?6 z$UmpYk-V~#%{qL0wCJfDGJ&MWb4pfjE)=n-g|Tqg%(bZ`tP?Z&*Y{rPfJl$n(9+G- zCug~w3I-8xvWOvB4B1vi(IOftL=IG0NEY0M(BW<&-K|w%*Mx*kkR;(?lMH2`&vu=B z(=pV=#GlTrfo7EQbY7y>RZ^7ayJ3>hmKW&ndaNW3WOkHuC6&o2G%3n_;}(>6nYc(4 ze{Jwgtob@fjuI!!6WH`I{iLuL#M#k!13f4N&YwF0il&doiVfI>4Fl=%&6=w%s!2X3td7fVSMCnF42kzpJS8^lz0ae!m)_Zw;Q@1c; z{dH|+rA+T`bg`{GH|Xj=Ps^#h)gKpFy6^E)el%L%A}t=GxIteO822a_*@423Wir7l zQ}-A-dForUQVZZu$v0<0No0CSr-R~>I&Ts$Z@Jq?LeqRw=XHM?0nO4+cS|#R4B7Xgt@(0_N5j*=p0AKXQz0|ZX%p%G>rMjzUHo8GHgrK-F`d^*6 z+PE$-UDz#3$Q~J;XY~GM#gX_2-X#Vx1{tH@g`%~bJUCOc+VY$#WotnOXhlV+%c3{b zS#_9Rcx9n`>h9kw7FvU=DJ~~^^X_(K_SzmPNV7zU_^8qecb>CU=xhw&cY3ZVnO8;s zTq}{jmn<(Dl(QxpV^1B6m_gl>Cv5HclT#A>`Fh*v9wL>!#<}LhD#E5uQ$b%R(8i~d znll6r5&l*%a+&z>XF}d?KMO|spAb?L+L8mB_I+Pj~k-LY7&rATQWbeyp4F7Fz}EXk<9Xq?Tm$^HrMQsngly3rd9$35%qj zk>Gz~wtF5%+^X?#BBc1)7Q@%sGV@uxaeZZ&-;#-d;NwNEvdHYqc?}P4-+=9}w`8h5 zu_004Lyz=-k*|#W*fhYEa<4FV-RkA7g`?PI3S(R6o2%PwR_2Tsyp;4f-10get=qjR z&;9rUGNTJTD|n~E)nQjJ&LZ2W2Ch+uQmNE0r!8tP4jnIKnq6=kYNEeOm!Vc_3B0p* zgx#Wnf|W&qEKn5=>1gPdITZ*)4&#DpuzLbWe#i&#W_;2$sqbWEO{wYS7xFuK8) zZdY;0m87_9=jSRz;ZD<=+)Xa7!qu&^CO@ip9L-V=F$Dv6`Z?15qcaw7fM)tctK2y9 z=a1R9Eb~*|v*os`PaL;=n6B64o1Y^D@op)jMJ#QFiIZ&{v?m6%t_Y9I96rRxi@1qgQcQtPL2L zRs6iXIqIU(PWXk%G!}M#Q`wwBCo>k6!ZHvTnE-Y!7{?Wte0D9pTMMOs*d$ z3~fS548+z_A{Ab-?+4m+2$*y(4?itnKIV@p8%o>#XSF)n^x{W4V`u`-QZ2p^H9gl{MVoi715+Jp6)yV8 zc(i@Nom_MUGcY_wJPobq6s*W%*m!XDVfeog4~g-fjaAkft5e`hD{bM_IT!BN#9@y0 z>N<3#awT=eqA7pXambqVeQK1W2AJ4E2maFTB=J_aymMH@O>zEAtzZ-xrEf!I+{2oEW-&!#@b?z& zt^Qu-P>7JzjW7p2c0-VX9ET=yj^&QT6je_)OBDDpF0E}9O)RAbE!=0-u4_Y^3GCaG zT%s#Bh#p;`R-pIJU2R!>o|w!aOak#QV3;2oLOUFOKK?nVi7dr1r*K~=X##XMzaI`H z%n*nD?~rT-L)|6UntFsdW1>v( zIE%h%Wb4%R$xt}NJ{VY*cvVO#%r-y+T7Qhgb$r{f`#-o`3pS)y-28mTU+#>e$8?QNe}x{sb+=Y}q@jP#^8 zkCLn9up$F!ev}^mUDvg7~vNatt*ye zsta@Qf%{(N517~*DjS8|J-2wbC|~gS zA#DYQHb_Mqmj>x#0Z6zUjpnta1Z0>~NYf$CM2HB(MUjX^b!?1jh*E1)hbwk< zC2K}`ACIt$5NE+oSnd8yw`EUKx~+fETzk416NLP}>h0vZwAg0uN?@)RnV%`KqA(*m z;cP-SN53T=UL4{0gx@)5!f^Y@x+<7kdhn1r-61P}q;YqjtxhV~dQw@ke_P}qgaC^P zd|n}L>mKmV=verldSeay&5F1M;cL1+@cpflsUlRw-k12=^ou=0=U`#~1;{WDx|#7# znGEm8nK}K`w7tL1k@#SbrpP-{bbH{D_VxoxW}w8MdBbShiOK7X?jVT9PG=`ef;mz; zY)n{=ImR(cJblqYf{3L$iuP|c>q^;u%40|!U!keHk& z(kcn=HRz~#q&kj4Opl?bHZG)2s>%gmuKo6X_X(e5$@{zqmr6%yPr=LG5{}eBiTjFD zLy=RFxU?7{;i__3Iu_YI1Bf2Z19QwYnoTP!^_v`!i76?y74zZ^kxo>iW=xKJGC#RK zn~Y{ajWZBu8w0!VKY;J>cZ#J$EEAUaP{ZA-By#)y0FVmyqMu1gk{?}OWSl;6Wf6;n z4cy1V$s#{0EpK@i)IZ8L`haU(p#=BiVUt}d8b{cp+Ufa%qfcJ?%7{~S?CC^FXN_PR zrTr9pZcMdFqYD1zLiL#pwEBH?ul3x?I?=)(8Wn3^m{ zW6m~{X=0VlGjMK6i_m0ou;67#u1omq7Ga-^I1K}KI{Di;#RIdy#h7Q7z`sNL-w*pY z223-_r{8hnzQ9)_>aHF(ud@z6l?UfZvdzFCiF2&@;ijn;!5_Ob9wDo6vn|2we{%s= zS#h|(pM9G$V{5L$ ziWQ^Unf!z!jL1(^6Y3QSF63zGDTxZ45)2?vpo+^yO(W+QkgtVz6})<>O0E5K*FgA$ zR(CIw#*lJ_uk1|H5;{2Cgj86BTSh8iU7j*Hr?}I)|4|mNZ|E|FNcyFrXJ%tR`H4VI z_)~td1bVP+Q*H&bQ=zaIM0q7DRHoQM_-Eny^hjM{Kfnv<>{#Ut77ysz5j5F#%rPe< zK?2qy8Q|vs1^mN!@k+s5^C4$3U@Pc<^T^M)FCk}%g_4r{77dOKP0>%@Ot5qUhQ{T^ z`K|E#df*Hw)#XDRhvInYhrP{D)P6#}U6~&TA*`;`X|UrkC?z_zglC#;r?ox03t_r} z{qo6qQ}<5B5Hy-ks<&(XVg5y27A&*9fop(NK?ECKTgW(}u4X5?IeK5NiNvBpk3^s; zzI;+rM4kqwN6!SnTnMtdN_UW+IGtl)Lg;Qp5FVF4m6K|QiwvBhv?s^89rx-Grn)|B zy-VlD7|cjW)@?g}fT2pfxFjw!SM=(0oGx{wqqOQP_cMgd($KKm_cJqT5XKw)M0N#I zMv;%vO>s>sn=A^X=nd2q7=~os@Nl2VT;!%HkSsP+LIOlXlLmL^Pn+A)ZtXH#ym!;Q zR>vf_zPCNyY9yK+pImFT(P_rTsiMTO&K(@z9y(c)y)! zrhz!FIsXZb^j{|xISWJPK0Zv^QKg#GnJ^fR^GYFDwB%+zP!_vY8C!riq|lSx_0}S2 z&es7)IAV}gr2cRGH9}Kw8r+Xp`R&u?W)ALmke^M_K3PApPZ2dqSVhl7NqpH%f0eU) z3Vi0+Y9+a=UV|r)aFaIf1x)hk%8yEK2rxBr*U39C(Np>I(E{GGBCsP53-QC~6KRrr zv8&Dg-s$nP$#5a|U1m7zlo73J!5!l4O z7YHRFc*RxWl?X!m-4zsmxgu}t1b#ts7Y^4yS@-?H;cudwT`h3}zC%)kT%w~&6oQj6 zcWq+c`5ZBv9sy%v;)4tyBgmP^-)GLJ^E}c{!v%C^*60rr8NRUXdwPDXzWe`pd07I; z(Ur!^xTf#^UHA!l>CK$+Pz8ZiXnA31>Zn|0ZLex1kqdU>U(>sS7ioggv)#-Y&Ng~O zQJ8FJG*vc@p3nP;&4N*5-at4ulE<1jKV1kF6`waPd>H1ubAkc+k>GaExeX)6KyKY< z#E#$A(f~ZYgvwyf(SR5&?{W1zfvgF+mY!kh`K|F@1oIb~yRRgh32M8jR=k_rIKI8_)@MP` z#_LYwn~>jqec6TQRa6`>4N|DiFPpw&Ei>f7{9(KQUNcZkkW4Fg+jvMnWP8@EkfS<- z`Nrk5IsLdTpLqln;uo;MMx2F865eN|?Z()l-D|5@ryn!}mAjbH?R;1fT9s%tU%YT& z5J0ODfk^s7Vk!g2OhQF3vDAJ{HYPd&Qp+oC@@p`1?7Y=NGt2TLmn%+MnbT;Z7j9o1 z?|xg5d44v$q`DxFx;$;-6JP#r@H&Jp4^od@E%X_Yi7*z({mvle@k?l9#(z0IRVg<+ zlBz>a!Ta{<))RF9PuJ^udVSBgf3C<(d?egg(HTkqq~K5s!Tr+1Z)YM_rdFEiCm z7T?KV39fmGp3t2@f*=l8o`oSzTgUb;{7m0CPT$tgi269@#W4ha|4MR1BcCG(IA++f zl{aEiO>8i!_aGP-jeDMuB^oZ}Df#=~?0UItlRTHCIGOT%)fXd{NzK%zB6k+Kq~d5A zO3Izzwof^qIIQQQpBIza2Sj?$@sP2AXK>b{9@2i*M?~tobnbaawga;5#WD|lfX_l+ ztCWeS@di#YsqvOzS`-#K?2xJGH;7TL2M&fbv$>u{cm2#kkBO{6K@Q%qw=ex|K+xw< zVg5SsQCyKwaOc1p{IWEm#7lJ-Y`yZAlNEa=kOs!bUwQMa;DWroSYhryqLKBle^h8=Y_`dz&R>&M_{r^l>o8SJ z$D3LyaRIxwHDgNthA`flc72k}=~Ety28kKruUx~GT8l|ConW6I3$FuIne^EF4(!6> zEhkNff|l+9V$y2B&oF>z=8sWdQa1(guVQxFgEvDm`q zNzg`$X+LVX&3~Co9Z*^g=r!^nDn(X8<9uIin-ni!K(39=_DhQN2`xfF%*ZoMPGLNe z^C|nf{VLFgcLMFxXM%W@j^W33FVfLWx03yqh|(Uj2RVM2tk>^zq1dHY$XT#N5^6aE z{*bvMLNtqjH@xTFn0W*KXrkhoL&5Bts_ZN`rDpOfzuh6gfqid@J5~)6d@G0lJSBJ{ zK)@UH^U7Vja5f(}Cb{q%vCguoFYXE!))^X>ep#d(SRy4QsVO?Vm{TJSgoFQmdbe>F zHTT5%gzWaIlhUne`n;linaHl~EogK3aX~Gz3+rYv1g#x0%CQC0v<}&d@Vxx^=$p`&Z_=8VbAbBdC z$I+=X2)6;9ESvR>=hE*6N#3a?k8}Ra>HhA%yDNfw`GTFK2X0bBW2e;ZC6N{P9T}0f zVuDUI(gYxiBs8t_>f>Y1T$pY@_Vsp(<&8UW|Gsa#>Xi6Zmro4AEg7b8<@y;@&HeA2 z-7~5O-&?wAq^KS-62nU|G|F16Ng>QrE6bA?uQ~WPq#*gwr&EJdr3%&HVvMnuC-M=P zmhZPG|L}9%VJ}9>++iJ^Km+crfWq}pjGJ{H??YZKra5fo@eDe<7$F=h(Ri`bWhtgN z5<6foR%0M~{dVPaXEdQcf42P%m>E%Vtb8+KFuH7Yxi5teLwY9&vbSm$(_f(S{lJ9>cdlA+CRL2SI1-Vr zC9m;Y&sKlSM+`GM4D}A+V6jA579UN|C*dM8Q-)~-Bl{x}KXM(-eKHk9S_{&QZ}e-# zSP2cywzP8|MEHFo`la)`lJgxO5OkTJb0PfsSLg^=<*gZ8=lP$3F1Cq&jN}TsYaV~5 zo-)W2xj|LY%TqHv89WwzXT-OQS0J~-B+;<%UgifL=%jIHl z2d;rNUIml;+5-=l+|cFG?Gq^dU{5PUY3xqCTs$}luIvA^@ldW?_Ws6rXl$4n(yj%l_a-J>Ex3UqJBqDaM%X% z$h@-WtO>~(l{vx{A*j!G7e#tGf{VSm%8PGY5k%sVvJkxK+TTuagjipNi}iDzsSQjo z8Vk`pYIV_YBB+QR*GpELf50_x$54c{@yovFVg|2(6(ulLlEZF-(b&_FG~4@B-u65b zT$5@#GwStxEYrRGjb>k!SNqfqdbv4)xek#d&GUQDwYp)VKtguYC@9$+@2|N6BCvL? z1&iWVF=MUDQ!nL{8e4UKTVc=S6=OF}J|#sEZQJ9<&tTfkXx9QTMX3-}Si{kj-D%u) zCQz&f&Klyg>~xT8$$+5inSF#kWOM1u*MpwSt<9&iLfEi$r4xn@Y9(JDojYBeoo#SK z1L@tw#pd_BB#KVfzzjut_B}g!9pc#3qkv2`*}EJWgH$$mGQBQ<;Y6Tl zNl}Dwg5Zq>WBTx%OKnlC`=6=O&JcJ;x#VS~eNZ^1`{8L9cH1tDE^^Q{M*Km*%SPKw z2+8%1ufyW70oSps^PmQwlT=cR(1UBl!tFve5fTa~m3jE`BN>FZOehwTxbhN< z^Je(U8!xr_B3w@%y~7hTJI*+4Hj|RLFdQxB41;Che!Ah3^}pws&4+uzX8*>dt~J8=)%<&T`%~3+BlV_P!Ezenv(bGj z2^6OwSed);9?kJAU!mjm<<8n$m}iE&A_@f=cXg%JcxI?Ngs+W(feR^Sg6t-8{1P_U znB%38Qor&cpED#71hk-3v4+Yy)2mp5qxHSW>m%{1M`|JP)E7cRVkkFd8z)XnBt`VcAvFGG_14)g}515uV{?9{Ky+*TaWyrSw+AoC>Eu@?X<2S zh^bS5EtcxnE5<6#LXQf?M9G@Zhuey}t(hc?HR?aFyX<>vGkRi;;i6w#aI-bIi8-^)zBGFGxB;U#}2i)u_5Q;w2m7>{wdURIp z&V}HimqEUl)y~>Gfcud=mtBZy-uHZdj5CA3d#U#qr<=q`AXDeA9F`xB%S(DVSAFm9 zU}FDF>{y%wn==M;^ru{k*0DpuwZ7wRqo=4Aa+yYgMOGUK=X)VJq8xhh0elxIr=0MH zSB`6oQ+R;XUyEjA$-V-L8)|ezl-rRCv_|wRmHFd_yW*t9PnxC2QFG2}# z3ek$0F?UJ=TZsHZa1VwWghwb{bp!G)zU=a+!Vl^wTitHX)?KRCzs>wK3IAK!L0Qyp zik9Y19-0#!Rxq;)i#aQ5Yy?k_snQt}-Z&I!?8);(2sqxy)9>Dm*B?x=p!7d}Pb^TE zo?0nbd0Q+e0mm=E8PupM?FUOLAeAm#1*?h4zldp6<2uyaaf@L`cM+e8=$WAQ8Cp;2*BSo#>8#B5rSpAj16&b8;6`ty^ zz0_q}@#O2n2?H(W&{|Krs2UL`k)<_FZVh%k=UnEMX=E7*$5)N7b>K6(U>hK95GU!lW}yV{!1$P3 zORZfPQYH2aJn$kbNulAz2NKqHH)BkcDVDJFGI@}WB@v;CjY3P9nzs+_<|#C;BmvxT z;3IFqOI`{1+KM`GUi`NmjhJ`#vaDwX5t?7)@E!5q`n2- z=0IDUg9@I^wLb`k;B4sAxr3xHWb@Rd)*W=O9)hBX1)Nsj#aAaEmX;S7;#!{gg+r=* zF(})K&}O!}R-MkWwGsM0)7en6PiuJr@k`QG%-uMj!Da4YHZdJU(BevX_&>gdF9$ zFAT!`*ek1tg)XB7>s@d*?7(<5?)SQQ!fjT@q}*+*{G{ue#C+)qBAUrTFf|*549@zx zoPEd4Kuv|2Pw?_ovF9WIzpcm(Ry_+Xo$EYrZtRcQ9iG%xv-tLc=#w>G=b(><`<$F| zB&Bz)*uu_Bg5bJ7LEz&#wl2Q&7g4JqmM~yD-t~R^o|mj@i7MsYU;9CY8T(Uj%8d$j zkF-wA@%2WTiM`>7Fw;pJPCO>V=~a+4Dw7GztY`N7wzno@zPmAF$mopdc9XpQK$MnA z{#ox43|_i%z>8Gx2VDokQzrd;O&_}I7mU39l)Js`m*UNNM1s#P%OF2SaSH3FG>uvF z>FOKUa#E5x;5#B#6!;?M-c*4x**+$Xp;2!Y%aB+ znf<{uw-EXbRN!QB(TPOo38Lwietx=L453^}%ry=I6BXQ2%d=g=maXv$nQVsJ)#sA& zwzyrLW4U*}53TEhpoFVt6l0DgfXsR7#!AIxzjeSIsBGdqOgL@{$tUbBzi~f$;K4Ft z6iHK9!fzarUEct~ZFId^>C5@|CCL(FMKR9#$9NJBPy~Rq1!Qt3#VKjKBIX#i;PATs z0KV)sF$bHMJ#Beb??l8p|$vA>TL;Aum=Oc4Hwi;`b)F8!y8~FBGb@ak9o``Lj0QY`# zi$=@(67L{tg#4dD7AmcGFuy(@jck*TqFW^=sGBt-n+Ze z=~f@!)agGnxeHcKLj z25$tqL*S^H5~^_X!R@(ehsG6fULkcP;yjgMP-`^AS7En$x0HnI>| zzPJlNqic;ksw%Y(WnI#92>E5`IvjyGVW^O-sL++qb<~Lb4q~B$87uB=vv31pc}ncz z*MajO_x_R`>ALkh2f0{+{e;+4IegR%$U*UX(xw&GUpgxv93LkIf zrA~FQ3t`KyEloCqSr)TNdm29lne zQTsz3S_!keE;)Mmo4~s(Okf?&=7(RUMn)X$`{DQ^L{YUht;$kDVC=GC=!b=MtX3>L z6C4~6ezsQXP^;ZCS42>a7zp58;9<;(WkCpIlR*yQKF3JlJH2z{MYRZPwc&_nkS?Zi z2=Z9Pcm7X_td|hP#nl*rvGG_%BUm^ur1ANU$LdxDjmsSBZ133H^>lf#eRD|hjp1n~ zmm5#8n(^K)d3G#t6lS&DpYgIQ63eIt6cu)V5~ z8Ok0aId*sa(<>QWI|%A(L8z2_zp+@TGS;ueQw@H9H4uEPb26nA+8T1XL_WsDV$pN& z9O7uTLI^18`pYKM-(d=6#DN#h&pi9ye1x*qDfBoo<^sMt-M0KErgkLIi}rC){| zBrH5wH6Wi;x~{fu>fJ!#_j8Jmp~&QDqgvIEn?F~!)YqMXOIEJ`di)w*{0l5FX3_Oi z)&n?BK^1UVowUY=G|`3eP<5HeMI*?MR{T2C>N)JsGXW^Hy&LUOZk|ep+-qneOGMkncIbJ^jojJsz<+Fh6%_ zmcely8qiwuBQFXsi<0v8o@&rog6xi1U9$nDUj= zvrV}i>YlYDJd-?aSWflQlcz2$=3M|*SN+|y(g6v|Gh`sX@)et3!tBgv(=V@CoQVK4 z==d1#bCw0K26(0GKQ#YkO1CXi>YF@u)mc6(hJ}|pz^nC%KknaMZsYZTYo_c*V&<$J zNT+*p&vX87dRJ*gC*?iL4zrs>%5-qVcFOQjSM4uMeZ<3h=9xSL*!VTB_4uQRp5cWUs<|)EH%ery+U4iJZUxPqkUrh0OcH+CpI0-STZW!4?*#V6- z$%_USeeL;WEBhUjE;|4;#0mL3m<1yj5W}MPr>t@~j>_Ay^{qynVhA1`Fl!rPS>`nh zz;F|7yo)uSSRU||3LGK>j&Pfe#dM%NiK^|zEcW<;oXcd@NisXS^+SY7Dh%UAP|V4&HNieFvGC1)CLWD7Z@(A zxwOMSzMyU^085wIv`}$FOjU-c~nPrL3DQZDhsr|E)PK ziQmadcG~1m7j80);***e*;zupng~!^S#A(yf$X+z>i?}-!2?Pcfz1QSkMR8>EN^-!2V-1T^9L6M(Mq0 zz{g`imK~qseS3MRbT!0kl4z55)f2O!M=JL<3t2cg0joUdMXqN_zgN6N9-4PL#nVw$ z#CAvHxN(dOAvm~Y=+Dq8Sn!Qapp*E5p9L(?Ze8xwL>7-xcp!O`nu)V4wa|kx+3(KY UFYibc&4YlSsi77Au|6T}UpK60ga7~l literal 0 HcmV?d00001 diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..e7885dd --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,38 @@ +import React from 'react'; +import { BrowserRouter, Routes, Route } from 'react-router-dom'; +import { Header } from './components/layout/Header'; +import { Suspense, lazy } from 'react'; +import Footer from './components/Footer'; + +// Lazy load pages +const Home = lazy(() => import('./pages/Home')); +const Daily = lazy(() => import('./pages/Daily')); +const Article = lazy(() => import('./pages/Article')); + +function App() { + return ( + +
+
+ {/* 页眉分隔线 */} +
+
+
+ +
+ Loading...
}> + + } /> + } /> + } /> + + + + +
+
+
+ ); +} + +export default App; diff --git a/frontend/src/components/Footer.tsx b/frontend/src/components/Footer.tsx new file mode 100644 index 0000000..fceb628 --- /dev/null +++ b/frontend/src/components/Footer.tsx @@ -0,0 +1,93 @@ +import { useTranslation } from 'react-i18next'; +import { FaTwitter, FaDiscord, FaWeibo } from 'react-icons/fa'; +import { MdVerified, MdHome } from 'react-icons/md'; +import { AiFillBilibili } from 'react-icons/ai'; +import { SiNeteasecloudmusic, SiTencentqq, SiYoutube, SiForgejo, SiBluesky } from 'react-icons/si'; +import { PiFediverseLogo } from 'react-icons/pi'; + +// 定义所有社交媒体链接 +const socialLinks = [ + { icon: MdVerified, href: '#', label: 'Keyoxide', order: 1 }, + { icon: SiYoutube, href: '#', label: 'YouTube', order: 2 }, + { icon: PiFediverseLogo, href: '#', label: 'Fediverse', order: 3 }, + { icon: SiBluesky, href: '#', label: 'Bluesky', order: 4 }, + { icon: FaDiscord, href: '#', label: 'Discord', order: 5 }, + { icon: SiForgejo, href: '#', label: 'Forgejo', order: 6 }, + { icon: FaTwitter, href: '#', label: 'Twitter', order: 7 }, + // 预留的额外图标,暂时隐藏 + { icon: AiFillBilibili, href: '#', label: 'Bilibili', hidden: true }, + { icon: SiNeteasecloudmusic, href: '#', label: 'NetEaseMusic', hidden: true }, + { icon: SiTencentqq, href: '#', label: 'QQGroup', hidden: true }, + { icon: FaWeibo, href: '#', label: 'Weibo', hidden: true }, + { icon: MdHome, href: '#', label: 'Home', hidden: true } +]; + +const Footer = () => { + const { t } = useTranslation(); + const currentYear = new Date().getFullYear(); + + return ( +
+
+ {/* 页脚分隔线 */} +
+ + {/* 桌面端:两端对齐布局 */} +
+ {/* Copyright */} +
+ © {currentYear} {t('footer.copyright')} +
+ + {/* Social Links */} +
+ {socialLinks + .filter(link => !link.hidden) + .sort((a, b) => a.order - b.order) + .map(({ icon: Icon, href, label }) => ( + + + + ))} +
+
+ + {/* 移动端:居中布局 */} +
+ {/* Copyright */} +
+ © {currentYear} {t('footer.copyright')} +
+ + {/* Social Links */} +
+ {socialLinks + .filter(link => !link.hidden) + .sort((a, b) => a.order - b.order) + .map(({ icon: Icon, href, label }) => ( + + + + ))} +
+
+
+
+ ); +}; + +export default Footer; diff --git a/frontend/src/components/layout/Header.tsx b/frontend/src/components/layout/Header.tsx new file mode 100644 index 0000000..ece24f6 --- /dev/null +++ b/frontend/src/components/layout/Header.tsx @@ -0,0 +1,239 @@ +import { useState, useRef, useEffect } from 'react'; +import { Link } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; +import { FiSun, FiMoon, FiSearch, FiGlobe, FiMonitor } from 'react-icons/fi'; +import { Menu } from '@headlessui/react'; +import { useTheme } from '../../hooks/useTheme'; + +const LANGUAGES = [ + { code: 'en', nativeName: 'English' }, + { code: 'zh-Hans', nativeName: '简体中文' }, + { code: 'zh-Hant', nativeName: '繁體中文' }, +]; + +export function Header() { + const { t, i18n } = useTranslation(); + const { theme, setTheme } = useTheme(); + const [isSearchOpen, setIsSearchOpen] = useState(false); + const searchRef = useRef(null); + + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if (searchRef.current && !searchRef.current.contains(event.target as Node)) { + setIsSearchOpen(false); + } + }; + + if (isSearchOpen) { + document.addEventListener('mousedown', handleClickOutside); + } + + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, [isSearchOpen]); + + const getThemeIcon = (themeType: string) => { + switch (themeType) { + case 'light': + return ; + case 'dark': + return ; + case 'system': + return ; + default: + return ; + } + }; + + return ( +
+
+
+
+ + + + + TSS News + + +
+ +
+ + + + + + + +
+ + {({ active }) => ( + + {t('nav.home')} + + )} + + + {({ active }) => ( + + {t('nav.daily')} + + )} + + {Object.entries(t('categories', { returnObjects: true })).map(([key, value]) => ( + + {({ active }) => ( + + {value as string} + + )} + + ))} + + {({ active }) => ( + + {t('nav.about')} + + )} + +
+
+
+
+ + + +
+ + + + + {getThemeIcon(theme)} + + +
+ {['light', 'dark', 'system'].map((themeType) => ( + + {({ active }) => ( + + )} + + ))} +
+
+
+ + + + + + +
+ {LANGUAGES.map((lang) => ( + + {({ active }) => ( + + )} + + ))} +
+
+
+
+
+
+ + {isSearchOpen && ( +
+
+
+ + +
+
+
+ )} +
+ ); +} diff --git a/frontend/src/hooks/useTheme.ts b/frontend/src/hooks/useTheme.ts new file mode 100644 index 0000000..ce2910b --- /dev/null +++ b/frontend/src/hooks/useTheme.ts @@ -0,0 +1,35 @@ +import { useState, useEffect } from 'react'; + +type Theme = 'light' | 'dark' | 'system'; + +export function useTheme() { + const [theme, setTheme] = useState( + () => (localStorage.getItem('theme') as Theme) || 'system' + ); + + const getSystemTheme = () => + window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'; + + useEffect(() => { + const root = window.document.documentElement; + const systemTheme = getSystemTheme(); + + root.classList.remove('light', 'dark'); + root.classList.add(theme === 'system' ? systemTheme : theme); + + localStorage.setItem('theme', theme); + + if (theme === 'system') { + const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)'); + const handleChange = (e: MediaQueryListEvent) => { + root.classList.remove('light', 'dark'); + root.classList.add(e.matches ? 'dark' : 'light'); + }; + + mediaQuery.addEventListener('change', handleChange); + return () => mediaQuery.removeEventListener('change', handleChange); + } + }, [theme]); + + return { theme, setTheme }; +} diff --git a/frontend/src/i18n.ts b/frontend/src/i18n.ts new file mode 100644 index 0000000..1dc336a --- /dev/null +++ b/frontend/src/i18n.ts @@ -0,0 +1,23 @@ +import i18n from 'i18next'; +import { initReactI18next } from 'react-i18next'; + +import en from '../data/i18n/en.json'; +import zhHans from '../data/i18n/zh-Hans.json'; +import zhHant from '../data/i18n/zh-Hant.json'; + +i18n + .use(initReactI18next) + .init({ + resources: { + en: { translation: en }, + 'zh-Hans': { translation: zhHans }, + 'zh-Hant': { translation: zhHant }, + }, + lng: 'zh-Hans', + fallbackLng: 'en', + interpolation: { + escapeValue: false, + }, + }); + +export default i18n; diff --git a/frontend/src/index.css b/frontend/src/index.css new file mode 100644 index 0000000..0229963 --- /dev/null +++ b/frontend/src/index.css @@ -0,0 +1,3 @@ +@import "tailwindcss"; + +@custom-variant dark (&:where(.dark, .dark *)); \ No newline at end of file diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx new file mode 100644 index 0000000..cc4e722 --- /dev/null +++ b/frontend/src/main.tsx @@ -0,0 +1,11 @@ +import { StrictMode } from 'react'; +import { createRoot } from 'react-dom/client'; +import App from './App.tsx'; +import './index.css'; +import './i18n'; + +createRoot(document.getElementById('root')!).render( + + + +); diff --git a/frontend/src/pages/Article.tsx b/frontend/src/pages/Article.tsx new file mode 100644 index 0000000..ecb089c --- /dev/null +++ b/frontend/src/pages/Article.tsx @@ -0,0 +1,30 @@ +import { useParams } from 'react-router-dom'; +import { useTranslation } from 'react-i18next'; +import { useState, useEffect } from 'react'; + +export default function Article() { + const { articleId } = useParams(); + const { i18n } = useTranslation(); + const [article, setArticle] = useState<{ + content: string; + metadata: any; + } | null>(null); + + useEffect(() => { + // In a real application, we would fetch the article content here + // based on the articleId and current language + console.log(`Fetching article ${articleId} in ${i18n.language}`); + }, [articleId, i18n.language]); + + if (!article) { + return
Loading...
; + } + + return ( +
+
+
+
+
+ ); +} diff --git a/frontend/src/pages/Daily.tsx b/frontend/src/pages/Daily.tsx new file mode 100644 index 0000000..f2272d7 --- /dev/null +++ b/frontend/src/pages/Daily.tsx @@ -0,0 +1,15 @@ +import { useTranslation } from 'react-i18next'; + +export default function Daily() { + const { t } = useTranslation(); + + return ( +
+

{t('nav.daily')}

+
+ {/* Daily content will be rendered here */} +

Coming soon...

+
+
+ ); +} diff --git a/frontend/src/pages/Home.tsx b/frontend/src/pages/Home.tsx new file mode 100644 index 0000000..24df208 --- /dev/null +++ b/frontend/src/pages/Home.tsx @@ -0,0 +1,17 @@ +import { useTranslation } from 'react-i18next'; +import { Link } from 'react-router-dom'; + +export default function Home() { + const { t } = useTranslation(); + + return ( +
+
+

Latest Articles

+
+ {/* Article cards will be rendered here */} +
+
+
+ ); +} diff --git a/frontend/src/vite-env.d.ts b/frontend/src/vite-env.d.ts new file mode 100644 index 0000000..11f02fe --- /dev/null +++ b/frontend/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js new file mode 100644 index 0000000..5fb7967 --- /dev/null +++ b/frontend/tailwind.config.js @@ -0,0 +1,14 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + ], + darkMode: 'class', + theme: { + extend: {}, + }, + plugins: [ + require('@tailwindcss/typography'), + ], +} diff --git a/frontend/tsconfig.app.json b/frontend/tsconfig.app.json new file mode 100644 index 0000000..f0a2350 --- /dev/null +++ b/frontend/tsconfig.app.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"] +} diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json new file mode 100644 index 0000000..1ffef60 --- /dev/null +++ b/frontend/tsconfig.json @@ -0,0 +1,7 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ] +} diff --git a/frontend/tsconfig.node.json b/frontend/tsconfig.node.json new file mode 100644 index 0000000..0d3d714 --- /dev/null +++ b/frontend/tsconfig.node.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2023"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["vite.config.ts"] +} diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts new file mode 100644 index 0000000..fc56097 --- /dev/null +++ b/frontend/vite.config.ts @@ -0,0 +1,14 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; +import tailwindcss from "@tailwindcss/vite"; + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [ + react(), + tailwindcss() + ], + optimizeDeps: { + exclude: ['lucide-react'], + }, +}); diff --git a/package.json b/package.json new file mode 100644 index 0000000..87cf918 --- /dev/null +++ b/package.json @@ -0,0 +1,13 @@ +{ + "name": "tss-rocks", + "private": true, + "workspaces": [ + "api", + "frontend" + ], + "scripts": { + "api:build": "pnpm --filter @tss-rocks/api build", + "dev": "pnpm --filter @tss-rocks/frontend dev", + "build": "pnpm api:build && pnpm --filter @tss-rocks/frontend build" + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 0000000..cce9a98 --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,4214 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: {} + + api: + devDependencies: + '@redocly/cli': + specifier: ^1.8.0 + version: 1.30.0 + + frontend: + dependencies: + '@headlessui/react': + specifier: ^2.2.0 + version: 2.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@tss-rocks/api': + specifier: workspace:* + version: link:../api + '@types/markdown-it': + specifier: ^14.1.2 + version: 14.1.2 + i18next: + specifier: ^24.2.2 + version: 24.2.2(typescript@5.7.3) + lucide-react: + specifier: ^0.474.0 + version: 0.474.0(react@19.0.0) + markdown-it: + specifier: ^14.1.0 + version: 14.1.0 + react: + specifier: ^19.0.0 + version: 19.0.0 + react-dom: + specifier: ^19.0.0 + version: 19.0.0(react@19.0.0) + react-i18next: + specifier: ^15.4.0 + version: 15.4.1(i18next@24.2.2(typescript@5.7.3))(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + react-icons: + specifier: ^5.4.0 + version: 5.5.0(react@19.0.0) + react-router-dom: + specifier: ^7.1.5 + version: 7.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + devDependencies: + '@eslint/js': + specifier: ^9.9.1 + version: 9.20.0 + '@tailwindcss/postcss': + specifier: ^4.0.3 + version: 4.0.7 + '@tailwindcss/typography': + specifier: ^0.5.16 + version: 0.5.16(tailwindcss@4.0.7) + '@tailwindcss/vite': + specifier: ^4.0.3 + version: 4.0.7(vite@6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1)) + '@types/react': + specifier: ^19.0.8 + version: 19.0.10 + '@types/react-dom': + specifier: ^19.0.3 + version: 19.0.4(@types/react@19.0.10) + '@vitejs/plugin-react': + specifier: ^4.3.1 + version: 4.3.4(vite@6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1)) + autoprefixer: + specifier: ^10.4.18 + version: 10.4.20(postcss@8.5.3) + eslint: + specifier: ^9.9.1 + version: 9.20.1(jiti@2.4.2) + eslint-plugin-react-hooks: + specifier: ^5.1.0-rc.0 + version: 5.1.0(eslint@9.20.1(jiti@2.4.2)) + eslint-plugin-react-refresh: + specifier: ^0.4.11 + version: 0.4.19(eslint@9.20.1(jiti@2.4.2)) + globals: + specifier: ^15.9.0 + version: 15.15.0 + postcss: + specifier: ^8.4.35 + version: 8.5.3 + tailwindcss: + specifier: ^4.0.3 + version: 4.0.7 + typescript: + specifier: ^5.5.3 + version: 5.7.3 + typescript-eslint: + specifier: ^8.3.0 + version: 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + vite: + specifier: ^6.1.0 + version: 6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1) + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.8': + resolution: {integrity: sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.26.9': + resolution: {integrity: sha512-lWBYIrF7qK5+GjY5Uy+/hEgp8OJWOD/rpy74GplYRhEauvbHDeFB8t5hPOZxCZ0Oxf4Cc36tK51/l3ymJysrKw==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.26.9': + resolution: {integrity: sha512-kEWdzjOAUMW4hAyrzJ0ZaTOu9OmpyDIQicIh0zg0EEcEkYXZb2TjtBhnHi2ViX7PKwZqF4xwqfAm299/QMP3lg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.26.5': + resolution: {integrity: sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.25.9': + resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.26.5': + resolution: {integrity: sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.25.9': + resolution: {integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.25.9': + resolution: {integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.25.9': + resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.26.9': + resolution: {integrity: sha512-Mz/4+y8udxBKdmzt/UjPACs4G3j5SshJJEFFKxlCGPydG4JAHXxjWjAwjd09tf6oINvl1VfMJo+nB7H2YKQ0dA==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.26.9': + resolution: {integrity: sha512-81NWa1njQblgZbQHxWHpxxCzNsa3ZwvFqpUg7P+NNUU6f3UU2jBEg4OlF/J6rl8+PQGh1q6/zWScd001YwcA5A==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-transform-react-jsx-self@7.25.9': + resolution: {integrity: sha512-y8quW6p0WHkEhmErnfe58r7x0A70uKphQm8Sp8cV7tjNQwK56sNVK0M73LK3WuYmsuyrftut4xAkjjgU0twaMg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-source@7.25.9': + resolution: {integrity: sha512-+iqjT8xmXhhYv4/uiYd8FNQsraMFZIfxVSqxxVSZP0WbbSAWvBXAul0m/zu+7Vv4O/3WtApy9pmaTMiumEZgfg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/runtime@7.26.9': + resolution: {integrity: sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg==} + engines: {node: '>=6.9.0'} + + '@babel/template@7.26.9': + resolution: {integrity: sha512-qyRplbeIpNZhmzOysF/wFMuP9sctmh2cFzRAZOn1YapxBsE1i9bJIY586R/WBLfLcmcBlM8ROBiQURnnNy+zfA==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.26.9': + resolution: {integrity: sha512-ZYW7L+pL8ahU5fXmNbPF+iZFHCv5scFak7MZ9bwaRPLUhHh7QQEMjZUg0HevihoqCM5iSYHN61EyCoZvqC+bxg==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.26.9': + resolution: {integrity: sha512-Y3IR1cRnOxOCDvMmNiym7XpXQ93iGDDPHx+Zj+NM+rg0fBaShfQLkg+hKPaZCEvg5N/LeCo4+Rj/i3FuJsIQaw==} + engines: {node: '>=6.9.0'} + + '@emotion/is-prop-valid@1.2.2': + resolution: {integrity: sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw==} + + '@emotion/memoize@0.8.1': + resolution: {integrity: sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==} + + '@emotion/unitless@0.8.1': + resolution: {integrity: sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==} + + '@esbuild/aix-ppc64@0.24.2': + resolution: {integrity: sha512-thpVCb/rhxE/BnMLQ7GReQLLN8q9qbHmI55F4489/ByVg2aQaQ6kbcLb6FHkocZzQhxc4gx0sCk0tJkKBFzDhA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.24.2': + resolution: {integrity: sha512-cNLgeqCqV8WxfcTIOeL4OAtSmL8JjcN6m09XIgro1Wi7cF4t/THaWEa7eL5CMoMBdjoHOTh/vwTO/o2TRXIyzg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.24.2': + resolution: {integrity: sha512-tmwl4hJkCfNHwFB3nBa8z1Uy3ypZpxqxfTQOcHX+xRByyYgunVbZ9MzUUfb0RxaHIMnbHagwAxuTL+tnNM+1/Q==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.24.2': + resolution: {integrity: sha512-B6Q0YQDqMx9D7rvIcsXfmJfvUYLoP722bgfBlO5cGvNVb5V/+Y7nhBE3mHV9OpxBf4eAS2S68KZztiPaWq4XYw==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.24.2': + resolution: {integrity: sha512-kj3AnYWc+CekmZnS5IPu9D+HWtUI49hbnyqk0FLEJDbzCIQt7hg7ucF1SQAilhtYpIujfaHr6O0UHlzzSPdOeA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.24.2': + resolution: {integrity: sha512-WeSrmwwHaPkNR5H3yYfowhZcbriGqooyu3zI/3GGpF8AyUdsrrP0X6KumITGA9WOyiJavnGZUwPGvxvwfWPHIA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.24.2': + resolution: {integrity: sha512-UN8HXjtJ0k/Mj6a9+5u6+2eZ2ERD7Edt1Q9IZiB5UZAIdPnVKDoG7mdTVGhHJIeEml60JteamR3qhsr1r8gXvg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.24.2': + resolution: {integrity: sha512-TvW7wE/89PYW+IevEJXZ5sF6gJRDY/14hyIGFXdIucxCsbRmLUcjseQu1SyTko+2idmCw94TgyaEZi9HUSOe3Q==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.24.2': + resolution: {integrity: sha512-7HnAD6074BW43YvvUmE/35Id9/NB7BeX5EoNkK9obndmZBUk8xmJJeU7DwmUeN7tkysslb2eSl6CTrYz6oEMQg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.24.2': + resolution: {integrity: sha512-n0WRM/gWIdU29J57hJyUdIsk0WarGd6To0s+Y+LwvlC55wt+GT/OgkwoXCXvIue1i1sSNWblHEig00GBWiJgfA==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.24.2': + resolution: {integrity: sha512-sfv0tGPQhcZOgTKO3oBE9xpHuUqguHvSo4jl+wjnKwFpapx+vUDcawbwPNuBIAYdRAvIDBfZVvXprIj3HA+Ugw==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.24.2': + resolution: {integrity: sha512-CN9AZr8kEndGooS35ntToZLTQLHEjtVB5n7dl8ZcTZMonJ7CCfStrYhrzF97eAecqVbVJ7APOEe18RPI4KLhwQ==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.24.2': + resolution: {integrity: sha512-iMkk7qr/wl3exJATwkISxI7kTcmHKE+BlymIAbHO8xanq/TjHaaVThFF6ipWzPHryoFsesNQJPE/3wFJw4+huw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.24.2': + resolution: {integrity: sha512-shsVrgCZ57Vr2L8mm39kO5PPIb+843FStGt7sGGoqiiWYconSxwTiuswC1VJZLCjNiMLAMh34jg4VSEQb+iEbw==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.24.2': + resolution: {integrity: sha512-4eSFWnU9Hhd68fW16GD0TINewo1L6dRrB+oLNNbYyMUAeOD2yCK5KXGK1GH4qD/kT+bTEXjsyTCiJGHPZ3eM9Q==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.24.2': + resolution: {integrity: sha512-S0Bh0A53b0YHL2XEXC20bHLuGMOhFDO6GN4b3YjRLK//Ep3ql3erpNcPlEFed93hsQAjAQDNsvcK+hV90FubSw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.24.2': + resolution: {integrity: sha512-8Qi4nQcCTbLnK9WoMjdC9NiTG6/E38RNICU6sUNqK0QFxCYgoARqVqxdFmWkdonVsvGqWhmm7MO0jyTqLqwj0Q==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.24.2': + resolution: {integrity: sha512-wuLK/VztRRpMt9zyHSazyCVdCXlpHkKm34WUyinD2lzK07FAHTq0KQvZZlXikNWkDGoT6x3TD51jKQ7gMVpopw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.24.2': + resolution: {integrity: sha512-VefFaQUc4FMmJuAxmIHgUmfNiLXY438XrL4GDNV1Y1H/RW3qow68xTwjZKfj/+Plp9NANmzbH5R40Meudu8mmw==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.24.2': + resolution: {integrity: sha512-YQbi46SBct6iKnszhSvdluqDmxCJA+Pu280Av9WICNwQmMxV7nLRHZfjQzwbPs3jeWnuAhE9Jy0NrnJ12Oz+0A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.24.2': + resolution: {integrity: sha512-+iDS6zpNM6EnJyWv0bMGLWSWeXGN/HTaF/LXHXHwejGsVi+ooqDfMCCTerNFxEkM3wYVcExkeGXNqshc9iMaOA==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.24.2': + resolution: {integrity: sha512-hTdsW27jcktEvpwNHJU4ZwWFGkz2zRJUz8pvddmXPtXDzVKTTINmlmga3ZzwcuMpUvLw7JkLy9QLKyGpD2Yxig==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.24.2': + resolution: {integrity: sha512-LihEQ2BBKVFLOC9ZItT9iFprsE9tqjDjnbulhHoFxYQtQfai7qfluVODIYxt1PgdoyQkz23+01rzwNwYfutxUQ==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.24.2': + resolution: {integrity: sha512-q+iGUwfs8tncmFC9pcnD5IvRHAzmbwQ3GPS5/ceCyHdjXubwQWI12MKWSNSMYLJMq23/IUCvJMS76PDqXe1fxA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.24.2': + resolution: {integrity: sha512-7VTgWzgMGvup6aSqDPLiW5zHaxYJGTO4OokMjIlrCtf+VpEL+cXKtCvg723iguPYI5oaUNdS+/V7OU2gvXVWEg==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.4.1': + resolution: {integrity: sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.1': + resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/config-array@0.19.2': + resolution: {integrity: sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/core@0.11.0': + resolution: {integrity: sha512-DWUB2pksgNEb6Bz2fggIy1wh6fGgZP4Xyy/Mt0QZPiloKKXerbqq9D3SBQTlCRYOrcRPu4vuz+CGjwdfqxnoWA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/eslintrc@3.2.0': + resolution: {integrity: sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/js@9.20.0': + resolution: {integrity: sha512-iZA07H9io9Wn836aVTytRaNqh00Sad+EamwOVJT12GTLw1VGMFV/4JaME+JjLtr9fiGaoWgYnS54wrfWsSs4oQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/object-schema@2.1.6': + resolution: {integrity: sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@eslint/plugin-kit@0.2.6': + resolution: {integrity: sha512-+0TjwR1eAUdZtvv/ir1mGX+v0tUoR3VEPB8Up0LLJC+whRW0GgBBtpbOkg/a/U4Dxa6l5a3l9AJ1aWIQVyoWJA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@exodus/schemasafe@1.3.0': + resolution: {integrity: sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==} + + '@floating-ui/core@1.6.9': + resolution: {integrity: sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==} + + '@floating-ui/dom@1.6.13': + resolution: {integrity: sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==} + + '@floating-ui/react-dom@2.1.2': + resolution: {integrity: sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/react@0.26.28': + resolution: {integrity: sha512-yORQuuAtVpiRjpMhdc0wJj06b9JFjrYF4qp96j++v2NBpbi6SEGF7donUJ3TMieerQ6qVkAv1tgr7L4r5roTqw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/utils@0.2.9': + resolution: {integrity: sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==} + + '@headlessui/react@2.2.0': + resolution: {integrity: sha512-RzCEg+LXsuI7mHiSomsu/gBJSjpupm6A1qIZ5sWjd7JhARNlMiSA4kKfJpCKwU9tE+zMRterhhrP74PvfJrpXQ==} + engines: {node: '>=10'} + peerDependencies: + react: ^18 || ^19 || ^19.0.0-rc + react-dom: ^18 || ^19 || ^19.0.0-rc + + '@humanfs/core@0.19.1': + resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} + engines: {node: '>=18.18.0'} + + '@humanfs/node@0.16.6': + resolution: {integrity: sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==} + engines: {node: '>=18.18.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/retry@0.3.1': + resolution: {integrity: sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==} + engines: {node: '>=18.18'} + + '@humanwhocodes/retry@0.4.2': + resolution: {integrity: sha512-xeO57FpIu4p1Ri3Jq/EXq4ClRm86dVF2z/+kvFnyqVYRavTZmaFaUBbWCOuuTh0o/g7DSsk6kc2vrS4Vl5oPOQ==} + engines: {node: '>=18.18'} + + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@opentelemetry/api-logs@0.53.0': + resolution: {integrity: sha512-8HArjKx+RaAI8uEIgcORbZIPklyh1YLjPSBus8hjRmvLi6DeFzgOcdZ7KwPabKj8mXF8dX0hyfAyGfycz0DbFw==} + engines: {node: '>=14'} + + '@opentelemetry/api@1.9.0': + resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} + engines: {node: '>=8.0.0'} + + '@opentelemetry/context-async-hooks@1.26.0': + resolution: {integrity: sha512-HedpXXYzzbaoutw6DFLWLDket2FwLkLpil4hGCZ1xYEIMTcivdfwEOISgdbLEWyG3HW52gTq2V9mOVJrONgiwg==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/core@1.26.0': + resolution: {integrity: sha512-1iKxXXE8415Cdv0yjG3G6hQnB5eVEsJce3QaawX8SjDn0mAS0ZM8fAbZZJD4ajvhC15cePvosSCut404KrIIvQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/exporter-trace-otlp-http@0.53.0': + resolution: {integrity: sha512-m7F5ZTq+V9mKGWYpX8EnZ7NjoqAU7VemQ1E2HAG+W/u0wpY1x0OmbxAXfGKFHCspdJk8UKlwPGrpcB8nay3P8A==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/otlp-exporter-base@0.53.0': + resolution: {integrity: sha512-UCWPreGQEhD6FjBaeDuXhiMf6kkBODF0ZQzrk/tuQcaVDJ+dDQ/xhJp192H9yWnKxVpEjFrSSLnpqmX4VwX+eA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/otlp-transformer@0.53.0': + resolution: {integrity: sha512-rM0sDA9HD8dluwuBxLetUmoqGJKSAbWenwD65KY9iZhUxdBHRLrIdrABfNDP7aiTjcgK8XFyTn5fhDz7N+W6DA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/propagator-b3@1.26.0': + resolution: {integrity: sha512-vvVkQLQ/lGGyEy9GT8uFnI047pajSOVnZI2poJqVGD3nJ+B9sFGdlHNnQKophE3lHfnIH0pw2ubrCTjZCgIj+Q==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-jaeger@1.26.0': + resolution: {integrity: sha512-DelFGkCdaxA1C/QA0Xilszfr0t4YbGd3DjxiCDPh34lfnFr+VkkrjV9S8ZTJvAzfdKERXhfOxIKBoGPJwoSz7Q==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/resources@1.26.0': + resolution: {integrity: sha512-CPNYchBE7MBecCSVy0HKpUISEeJOniWqcHaAHpmasZ3j9o6V3AyBzhRc90jdmemq0HOxDr6ylhUbDhBqqPpeNw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.53.0': + resolution: {integrity: sha512-dhSisnEgIj/vJZXZV6f6KcTnyLDx/VuQ6l3ejuZpMpPlh9S1qMHiZU9NMmOkVkwwHkMy3G6mEBwdP23vUZVr4g==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@1.26.0': + resolution: {integrity: sha512-0SvDXmou/JjzSDOjUmetAAvcKQW6ZrvosU0rkbDGpXvvZN+pQF6JbK/Kd4hNdK4q/22yeruqvukXEJyySTzyTQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@1.26.0': + resolution: {integrity: sha512-olWQldtvbK4v22ymrKLbIcBi9L2SpMO84sCPY54IVsJhP9fRsxJT194C/AVaAuJzLE30EdhhM1VmvVYR7az+cw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@1.26.0': + resolution: {integrity: sha512-Fj5IVKrj0yeUwlewCRwzOVcr5avTuNnMHWf7GPc1t6WaT78J6CJyF3saZ/0RkZfdeNO8IcBl/bNcWMVZBMRW8Q==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.27.0': + resolution: {integrity: sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==} + engines: {node: '>=14'} + + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + + '@react-aria/focus@3.19.1': + resolution: {integrity: sha512-bix9Bu1Ue7RPcYmjwcjhB14BMu2qzfJ3tMQLqDc9pweJA66nOw8DThy3IfVr8Z7j2PHktOLf9kcbiZpydKHqzg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/interactions@3.23.0': + resolution: {integrity: sha512-0qR1atBIWrb7FzQ+Tmr3s8uH5mQdyRH78n0krYaG8tng9+u1JlSi8DGRSaC9ezKyNB84m7vHT207xnHXGeJ3Fg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/ssr@3.9.7': + resolution: {integrity: sha512-GQygZaGlmYjmYM+tiNBA5C6acmiDWF52Nqd40bBp0Znk4M4hP+LTmI0lpI1BuKMw45T8RIhrAsICIfKwZvi2Gg==} + engines: {node: '>= 12'} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/utils@3.27.0': + resolution: {integrity: sha512-p681OtApnKOdbeN8ITfnnYqfdHS0z7GE+4l8EXlfLnr70Rp/9xicBO6d2rU+V/B3JujDw2gPWxYKEnEeh0CGCw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-stately/utils@3.10.5': + resolution: {integrity: sha512-iMQSGcpaecghDIh3mZEpZfoFH3ExBwTtuBEcvZ2XnGzCgQjeYXcMdIUwAfVQLXFTdHUHGF6Gu6/dFrYsCzySBQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-types/shared@3.27.0': + resolution: {integrity: sha512-gvznmLhi6JPEf0bsq7SwRYTHAKKq/wcmKqFez9sRdbED+SPMUmK5omfZ6w3EwUFQHbYUa4zPBYedQ7Knv70RMw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@redocly/ajv@8.11.2': + resolution: {integrity: sha512-io1JpnwtIcvojV7QKDUSIuMN/ikdOUd1ReEnUnMKGfDVridQZ31J0MmIuqwuRjWDZfmvr+Q0MqCcfHM2gTivOg==} + + '@redocly/cli@1.30.0': + resolution: {integrity: sha512-YJPEUVe4DbE4uem8oiozHeEwNqGeY5xgWdEseBnjynPQEGLv70t3IPjrD5X36U2ocHh1QZspN24dvhgkLcn0/Q==} + engines: {node: '>=18.17.0', npm: '>=9.5.0'} + hasBin: true + + '@redocly/config@0.20.3': + resolution: {integrity: sha512-Nyyv1Bj7GgYwj/l46O0nkH1GTKWbO3Ixe7KFcn021aZipkZd+z8Vlu1BwkhqtVgivcKaClaExtWU/lDHkjBzag==} + + '@redocly/openapi-core@1.30.0': + resolution: {integrity: sha512-ZZc+FXKoQXJ9cOR7qRKHxOfKOsGCj2wSodklKdtM2FofzyjzvIwn1rksD5+9iJxvHuORPOPv3ppAHcM+iMr/Ag==} + engines: {node: '>=18.17.0', npm: '>=9.5.0'} + + '@rollup/rollup-android-arm-eabi@4.34.8': + resolution: {integrity: sha512-q217OSE8DTp8AFHuNHXo0Y86e1wtlfVrXiAlwkIvGRQv9zbc6mE3sjIVfwI8sYUyNxwOg0j/Vm1RKM04JcWLJw==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.34.8': + resolution: {integrity: sha512-Gigjz7mNWaOL9wCggvoK3jEIUUbGul656opstjaUSGC3eT0BM7PofdAJaBfPFWWkXNVAXbaQtC99OCg4sJv70Q==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.34.8': + resolution: {integrity: sha512-02rVdZ5tgdUNRxIUrFdcMBZQoaPMrxtwSb+/hOfBdqkatYHR3lZ2A2EGyHq2sGOd0Owk80oV3snlDASC24He3Q==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.34.8': + resolution: {integrity: sha512-qIP/elwR/tq/dYRx3lgwK31jkZvMiD6qUtOycLhTzCvrjbZ3LjQnEM9rNhSGpbLXVJYQ3rq39A6Re0h9tU2ynw==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.34.8': + resolution: {integrity: sha512-IQNVXL9iY6NniYbTaOKdrlVP3XIqazBgJOVkddzJlqnCpRi/yAeSOa8PLcECFSQochzqApIOE1GHNu3pCz+BDA==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.34.8': + resolution: {integrity: sha512-TYXcHghgnCqYFiE3FT5QwXtOZqDj5GmaFNTNt3jNC+vh22dc/ukG2cG+pi75QO4kACohZzidsq7yKTKwq/Jq7Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.34.8': + resolution: {integrity: sha512-A4iphFGNkWRd+5m3VIGuqHnG3MVnqKe7Al57u9mwgbyZ2/xF9Jio72MaY7xxh+Y87VAHmGQr73qoKL9HPbXj1g==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.34.8': + resolution: {integrity: sha512-S0lqKLfTm5u+QTxlFiAnb2J/2dgQqRy/XvziPtDd1rKZFXHTyYLoVL58M/XFwDI01AQCDIevGLbQrMAtdyanpA==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.34.8': + resolution: {integrity: sha512-jpz9YOuPiSkL4G4pqKrus0pn9aYwpImGkosRKwNi+sJSkz+WU3anZe6hi73StLOQdfXYXC7hUfsQlTnjMd3s1A==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.34.8': + resolution: {integrity: sha512-KdSfaROOUJXgTVxJNAZ3KwkRc5nggDk+06P6lgi1HLv1hskgvxHUKZ4xtwHkVYJ1Rep4GNo+uEfycCRRxht7+Q==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loongarch64-gnu@4.34.8': + resolution: {integrity: sha512-NyF4gcxwkMFRjgXBM6g2lkT58OWztZvw5KkV2K0qqSnUEqCVcqdh2jN4gQrTn/YUpAcNKyFHfoOZEer9nwo6uQ==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-powerpc64le-gnu@4.34.8': + resolution: {integrity: sha512-LMJc999GkhGvktHU85zNTDImZVUCJ1z/MbAJTnviiWmmjyckP5aQsHtcujMjpNdMZPT2rQEDBlJfubhs3jsMfw==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.34.8': + resolution: {integrity: sha512-xAQCAHPj8nJq1PI3z8CIZzXuXCstquz7cIOL73HHdXiRcKk8Ywwqtx2wrIy23EcTn4aZ2fLJNBB8d0tQENPCmw==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.34.8': + resolution: {integrity: sha512-DdePVk1NDEuc3fOe3dPPTb+rjMtuFw89gw6gVWxQFAuEqqSdDKnrwzZHrUYdac7A7dXl9Q2Vflxpme15gUWQFA==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.34.8': + resolution: {integrity: sha512-8y7ED8gjxITUltTUEJLQdgpbPh1sUQ0kMTmufRF/Ns5tI9TNMNlhWtmPKKHCU0SilX+3MJkZ0zERYYGIVBYHIA==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.34.8': + resolution: {integrity: sha512-SCXcP0ZpGFIe7Ge+McxY5zKxiEI5ra+GT3QRxL0pMMtxPfpyLAKleZODi1zdRHkz5/BhueUrYtYVgubqe9JBNQ==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-win32-arm64-msvc@4.34.8': + resolution: {integrity: sha512-YHYsgzZgFJzTRbth4h7Or0m5O74Yda+hLin0irAIobkLQFRQd1qWmnoVfwmKm9TXIZVAD0nZ+GEb2ICicLyCnQ==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.34.8': + resolution: {integrity: sha512-r3NRQrXkHr4uWy5TOjTpTYojR9XmF0j/RYgKCef+Ag46FWUTltm5ziticv8LdNsDMehjJ543x/+TJAek/xBA2w==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.34.8': + resolution: {integrity: sha512-U0FaE5O1BCpZSeE6gBl3c5ObhePQSfk9vDRToMmTkbhCOgW4jqvtS5LGyQ76L1fH8sM0keRp4uDTsbjiUyjk0g==} + cpu: [x64] + os: [win32] + + '@swc/helpers@0.5.15': + resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} + + '@tailwindcss/node@4.0.7': + resolution: {integrity: sha512-dkFXufkbRB2mu3FPsW5xLAUWJyexpJA+/VtQj18k3SUiJVLdpgzBd1v1gRRcIpEJj7K5KpxBKfOXlZxT3ZZRuA==} + + '@tailwindcss/oxide-android-arm64@4.0.7': + resolution: {integrity: sha512-5iQXXcAeOHBZy8ASfHFm1k0O/9wR2E3tKh6+P+ilZZbQiMgu+qrnfpBWYPc3FPuQdWiWb73069WT5D+CAfx/tg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.0.7': + resolution: {integrity: sha512-7yGZtEc5IgVYylqK/2B0yVqoofk4UAbkn1ygNpIJZyrOhbymsfr8uUFCueTu2fUxmAYIfMZ8waWo2dLg/NgLgg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.0.7': + resolution: {integrity: sha512-tPQDV20fBjb26yWbPqT1ZSoDChomMCiXTKn4jupMSoMCFyU7+OJvIY1ryjqBuY622dEBJ8LnCDDWsnj1lX9nNQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.0.7': + resolution: {integrity: sha512-sZqJpTyTZiknU9LLHuByg5GKTW+u3FqM7q7myequAXxKOpAFiOfXpY710FuMY+gjzSapyRbDXJlsTQtCyiTo5w==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.0.7': + resolution: {integrity: sha512-PBgvULgeSswjd8cbZ91gdIcIDMdc3TUHV5XemEpxlqt9M8KoydJzkuB/Dt910jYdofOIaTWRL6adG9nJICvU4A==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.0.7': + resolution: {integrity: sha512-By/a2yeh+e9b+C67F88ndSwVJl2A3tcUDb29FbedDi+DZ4Mr07Oqw9Y1DrDrtHIDhIZ3bmmiL1dkH2YxrtV+zw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.0.7': + resolution: {integrity: sha512-WHYs3cpPEJb/ccyT20NOzopYQkl7JKncNBUbb77YFlwlXMVJLLV3nrXQKhr7DmZxz2ZXqjyUwsj2rdzd9stYdw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.0.7': + resolution: {integrity: sha512-7bP1UyuX9kFxbOwkeIJhBZNevKYPXB6xZI37v09fqi6rqRJR8elybwjMUHm54GVP+UTtJ14ueB1K54Dy1tIO6w==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.0.7': + resolution: {integrity: sha512-gBQIV8nL/LuhARNGeroqzXymMzzW5wQzqlteVqOVoqwEfpHOP3GMird5pGFbnpY+NP0fOlsZGrxxOPQ4W/84bQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-win32-arm64-msvc@4.0.7': + resolution: {integrity: sha512-aH530NFfx0kpQpvYMfWoeG03zGnRCMVlQG8do/5XeahYydz+6SIBxA1tl/cyITSJyWZHyVt6GVNkXeAD30v0Xg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.0.7': + resolution: {integrity: sha512-8Cva6bbJN7ZJx320k7vxGGdU0ewmpfS5A4PudyzUuofdi8MgeINuiiWiPQ0VZCda/GX88K6qp+6UpDZNVr8HMQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.0.7': + resolution: {integrity: sha512-yr6w5YMgjy+B+zkJiJtIYGXW+HNYOPfRPtSs+aqLnKwdEzNrGv4ZuJh9hYJ3mcA+HMq/K1rtFV+KsEr65S558g==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.0.7': + resolution: {integrity: sha512-zXcKs1uGssVDlnsQ+iwrkul5GPKvsXPynGCuk/eXLx3DVhHlQKMpA6tXN2oO28x2ki1xRBTfadKiHy2taVvp7g==} + + '@tailwindcss/typography@0.5.16': + resolution: {integrity: sha512-0wDLwCVF5V3x3b1SGXPCDcdsbDHMBe+lkFzBRaHeLvNi+nrrnZ1lA18u+OTWO8iSWU2GxUOCvlXtDuqftc1oiA==} + peerDependencies: + tailwindcss: '>=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1' + + '@tailwindcss/vite@4.0.7': + resolution: {integrity: sha512-GYx5sxArfIMtdZCsxfya3S/efMmf4RvfqdiLUozkhmSFBNUFnYVodatpoO/en4/BsOIGvq/RB6HwcTLn9prFnQ==} + peerDependencies: + vite: ^5.2.0 || ^6 + + '@tanstack/react-virtual@3.13.0': + resolution: {integrity: sha512-CchF0NlLIowiM2GxtsoKBkXA4uqSnY2KvnXo+kyUFD4a4ll6+J0qzoRsUPMwXV/H26lRsxgJIr/YmjYum2oEjg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + '@tanstack/virtual-core@3.13.0': + resolution: {integrity: sha512-NBKJP3OIdmZY3COJdWkSonr50FMVIi+aj5ZJ7hI/DTpEKg2RMfo/KvP8A3B/zOSpMgIe52B5E2yn7rryULzA6g==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.6.8': + resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.6': + resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} + + '@types/cookie@0.6.0': + resolution: {integrity: sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==} + + '@types/estree@1.0.6': + resolution: {integrity: sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@types/linkify-it@5.0.0': + resolution: {integrity: sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==} + + '@types/markdown-it@14.1.2': + resolution: {integrity: sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==} + + '@types/mdurl@2.0.0': + resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==} + + '@types/node@22.13.4': + resolution: {integrity: sha512-ywP2X0DYtX3y08eFVx5fNIw7/uIv8hYUKgXoK8oayJlLnKcRfEYCxWMVE1XagUdVtCJlZT1AU4LXEABW+L1Peg==} + + '@types/react-dom@19.0.4': + resolution: {integrity: sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg==} + peerDependencies: + '@types/react': ^19.0.0 + + '@types/react@19.0.10': + resolution: {integrity: sha512-JuRQ9KXLEjaUNjTWpzuR231Z2WpIwczOkBEIvbHNCzQefFIT0L8IqE6NV6ULLyC1SI/i234JnDoMkfg+RjQj2g==} + + '@types/stylis@4.2.5': + resolution: {integrity: sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==} + + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + + '@typescript-eslint/eslint-plugin@8.24.1': + resolution: {integrity: sha512-ll1StnKtBigWIGqvYDVuDmXJHVH4zLVot1yQ4fJtLpL7qacwkxJc1T0bptqw+miBQ/QfUbhl1TcQ4accW5KUyA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.0.0 || ^8.0.0-alpha.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/parser@8.24.1': + resolution: {integrity: sha512-Tqoa05bu+t5s8CTZFaGpCH2ub3QeT9YDkXbPd3uQ4SfsLoh1/vv2GEYAioPoxCWJJNsenXlC88tRjwoHNts1oQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/scope-manager@8.24.1': + resolution: {integrity: sha512-OdQr6BNBzwRjNEXMQyaGyZzgg7wzjYKfX2ZBV3E04hUCBDv3GQCHiz9RpqdUIiVrMgJGkXm3tcEh4vFSHreS2Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/type-utils@8.24.1': + resolution: {integrity: sha512-/Do9fmNgCsQ+K4rCz0STI7lYB4phTtEXqqCAs3gZW0pnK7lWNkvWd5iW545GSmApm4AzmQXmSqXPO565B4WVrw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/types@8.24.1': + resolution: {integrity: sha512-9kqJ+2DkUXiuhoiYIUvIYjGcwle8pcPpdlfkemGvTObzgmYfJ5d0Qm6jwb4NBXP9W1I5tss0VIAnWFumz3mC5A==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.24.1': + resolution: {integrity: sha512-UPyy4MJ/0RE648DSKQe9g0VDSehPINiejjA6ElqnFaFIhI6ZEiZAkUI0D5MCk0bQcTf/LVqZStvQ6K4lPn/BRg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/utils@8.24.1': + resolution: {integrity: sha512-OOcg3PMMQx9EXspId5iktsI3eMaXVwlhC8BvNnX6B5w9a4dVgpkQZuU8Hy67TolKcl+iFWq0XX+jbDGN4xWxjQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + '@typescript-eslint/visitor-keys@8.24.1': + resolution: {integrity: sha512-EwVHlp5l+2vp8CoqJm9KikPZgi3gbdZAtabKT9KPShGeOcJhsv4Zdo3oc8T8I0uKEmYoU4ItyxbptjF08enaxg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@vitejs/plugin-react@4.3.4': + resolution: {integrity: sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + vite: ^4.2.0 || ^5.0.0 || ^6.0.0 + + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.14.0: + resolution: {integrity: sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==} + engines: {node: '>=0.4.0'} + hasBin: true + + agent-base@7.1.3: + resolution: {integrity: sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==} + engines: {node: '>= 14'} + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + autoprefixer@10.4.20: + resolution: {integrity: sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.4: + resolution: {integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-me-maybe@1.0.2: + resolution: {integrity: sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelize@1.0.1: + resolution: {integrity: sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ==} + + caniuse-lite@1.0.30001700: + resolution: {integrity: sha512-2S6XIXwaE7K7erT8dY+kLQcpa5ms63XlRkMkReXjle+kf6c5g38vyMl+Z5y8dSxOFDhcFe+nxnn261PLxBSQsQ==} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} + engines: {node: '>= 8.10.0'} + + classnames@2.5.1: + resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} + + cliui@7.0.4: + resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + colorette@1.4.0: + resolution: {integrity: sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + cookie@1.0.2: + resolution: {integrity: sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==} + engines: {node: '>=18'} + + core-js@3.40.0: + resolution: {integrity: sha512-7vsMc/Lty6AGnn7uFpYT56QesI5D2Y/UkgKounk87OP9Z2H9Z8kj6jzcSGAxFmUtDOS0ntK6lbQz+Nsa0Jj6mQ==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + css-color-keywords@1.0.0: + resolution: {integrity: sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==} + engines: {node: '>=4'} + + css-to-react-native@3.2.0: + resolution: {integrity: sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ==} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decko@1.2.0: + resolution: {integrity: sha512-m8FnyHXV1QX+S1cl+KPFDIl6NMkxtKsy6+U/aYyjrOqWMuwAwYWu7ePqrsUHtDR5Y8Yk2pi/KIDSgF+vT4cPOQ==} + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + detect-libc@1.0.3: + resolution: {integrity: sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==} + engines: {node: '>=0.10'} + hasBin: true + + dompurify@3.2.4: + resolution: {integrity: sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + electron-to-chromium@1.5.102: + resolution: {integrity: sha512-eHhqaja8tE/FNpIiBrvBjFV/SSKpyWHLvxuR9dPTdo+3V9ppdLmFB7ZZQ98qNovcngPLYIz0oOBF9P0FfZef5Q==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + enhanced-resolve@5.18.1: + resolution: {integrity: sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==} + engines: {node: '>=10.13.0'} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + es6-promise@3.3.1: + resolution: {integrity: sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==} + + esbuild@0.24.2: + resolution: {integrity: sha512-+9egpBW8I3CD5XPe0n6BfT5fxLzxrlDzqydF3aviG+9ni1lDC/OvMHcxqEFV0+LANZG5R1bFMWfUrjVsdwxJvA==} + engines: {node: '>=18'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-plugin-react-hooks@5.1.0: + resolution: {integrity: sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw==} + engines: {node: '>=10'} + peerDependencies: + eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0 + + eslint-plugin-react-refresh@0.4.19: + resolution: {integrity: sha512-eyy8pcr/YxSYjBoqIFSrlbn9i/xvxUFa8CjzAYo9cFjgGXqq1hyjihcpZvxRLalpaWmueWR81xn7vuKmAFijDQ==} + peerDependencies: + eslint: '>=8.40' + + eslint-scope@8.2.0: + resolution: {integrity: sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@4.2.0: + resolution: {integrity: sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + eslint@9.20.1: + resolution: {integrity: sha512-m1mM33o6dBUjxl2qb6wv6nGNwCAsns1eKtaQ4l/NPHeTvhiUPbtdfMyktxN4B3fgHIgsYh1VT3V9txblpQHq+g==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@10.3.0: + resolution: {integrity: sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + eventemitter3@5.0.1: + resolution: {integrity: sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fast-safe-stringify@2.1.1: + resolution: {integrity: sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==} + + fast-xml-parser@4.5.2: + resolution: {integrity: sha512-xmnYV9o0StIz/0ArdzmWTxn9oDy0lH8Z80/8X/TD2EUQKXY4DHxoT9mYBqgGIG17DgddCJtH1M6DriMbalNsAA==} + hasBin: true + + fastq@1.19.0: + resolution: {integrity: sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==} + + file-entry-cache@8.0.0: + resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} + engines: {node: '>=16.0.0'} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@4.0.1: + resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} + engines: {node: '>=16'} + + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + + foreach@2.0.6: + resolution: {integrity: sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==} + + form-data@4.0.2: + resolution: {integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==} + engines: {node: '>= 6'} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.2.7: + resolution: {integrity: sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA==} + engines: {node: '>= 0.4'} + + get-port-please@3.1.2: + resolution: {integrity: sha512-Gxc29eLs1fbn6LQ4jSU4vXjlwyZhF5HsGuMAa7gqBP4Rw4yxxltyDUuF5MBclFzDTXO+ACchGQoeela4DSfzdQ==} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + globals@14.0.0: + resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} + engines: {node: '>=18'} + + globals@15.15.0: + resolution: {integrity: sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg==} + engines: {node: '>=18'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + handlebars@4.7.8: + resolution: {integrity: sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==} + engines: {node: '>=0.4.7'} + hasBin: true + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + html-parse-stringify@3.0.1: + resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==} + + http2-client@1.3.5: + resolution: {integrity: sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + + i18next@24.2.2: + resolution: {integrity: sha512-NE6i86lBCKRYZa5TaUDkU5S4HFgLIEJRLr3Whf2psgaxBleQ2LC1YW1Vc+SCgkAW7VEzndT6al6+CzegSUHcTQ==} + peerDependencies: + typescript: ^5 + peerDependenciesMeta: + typescript: + optional: true + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + jiti@2.4.2: + resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==} + hasBin: true + + js-levenshtein@1.1.6: + resolution: {integrity: sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==} + engines: {node: '>=0.10.0'} + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-pointer@0.6.2: + resolution: {integrity: sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lightningcss-darwin-arm64@1.29.1: + resolution: {integrity: sha512-HtR5XJ5A0lvCqYAoSv2QdZZyoHNttBpa5EP9aNuzBQeKGfbyH5+UipLWvVzpP4Uml5ej4BYs5I9Lco9u1fECqw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.29.1: + resolution: {integrity: sha512-k33G9IzKUpHy/J/3+9MCO4e+PzaFblsgBjSGlpAaFikeBFm8B/CkO3cKU9oI4g+fjS2KlkLM/Bza9K/aw8wsNA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.29.1: + resolution: {integrity: sha512-0SUW22fv/8kln2LnIdOCmSuXnxgxVC276W5KLTwoehiO0hxkacBxjHOL5EtHD8BAXg2BvuhsJPmVMasvby3LiQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.29.1: + resolution: {integrity: sha512-sD32pFvlR0kDlqsOZmYqH/68SqUMPNj+0pucGxToXZi4XZgZmqeX/NkxNKCPsswAXU3UeYgDSpGhu05eAufjDg==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.29.1: + resolution: {integrity: sha512-0+vClRIZ6mmJl/dxGuRsE197o1HDEeeRk6nzycSy2GofC2JsY4ifCRnvUWf/CUBQmlrvMzt6SMQNMSEu22csWQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.29.1: + resolution: {integrity: sha512-UKMFrG4rL/uHNgelBsDwJcBqVpzNJbzsKkbI3Ja5fg00sgQnHw/VrzUTEc4jhZ+AN2BvQYz/tkHu4vt1kLuJyw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.29.1: + resolution: {integrity: sha512-u1S+xdODy/eEtjADqirA774y3jLcm8RPtYztwReEXoZKdzgsHYPl0s5V52Tst+GKzqjebkULT86XMSxejzfISw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.29.1: + resolution: {integrity: sha512-L0Tx0DtaNUTzXv0lbGCLB/c/qEADanHbu4QdcNOXLIe1i8i22rZRpbT3gpWYsCh9aSL9zFujY/WmEXIatWvXbw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.29.1: + resolution: {integrity: sha512-QoOVnkIEFfbW4xPi+dpdft/zAKmgLgsRHfJalEPYuJDOWf7cLQzYg0DEh8/sn737FaeMJxHZRc1oBreiwZCjog==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.29.1: + resolution: {integrity: sha512-NygcbThNBe4JElP+olyTI/doBNGJvLs3bFCRPdvuCcxZCcCZ71B858IHpdm7L1btZex0FvCmM17FK98Y9MRy1Q==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.29.1: + resolution: {integrity: sha512-FmGoeD4S05ewj+AkhTY+D+myDvXI6eL27FjHIjoyUkO/uw7WZD1fBVs0QxeYWa7E17CUHJaYX/RUGISCtcrG4Q==} + engines: {node: '>= 12.0.0'} + + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash.castarray@4.4.0: + resolution: {integrity: sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + long@5.3.1: + resolution: {integrity: sha512-ka87Jz3gcx/I7Hal94xaN2tZEOPoUOEVftkQqZx2EeQRN7LGdfLlI3FvZ+7WDplm+vK2Urx9ULrvSowtdCieng==} + + loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + lucide-react@0.474.0: + resolution: {integrity: sha512-CmghgHkh0OJNmxGKWc0qfPJCYHASPMVSyGY8fj3xgk4v84ItqDg64JNKFZn5hC6E0vHi6gxnbCgwhyVB09wQtA==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + lunr@2.3.9: + resolution: {integrity: sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==} + + mark.js@8.11.1: + resolution: {integrity: sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==} + + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} + hasBin: true + + marked@4.3.0: + resolution: {integrity: sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==} + engines: {node: '>= 12'} + hasBin: true + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + mobx-react-lite@4.1.0: + resolution: {integrity: sha512-QEP10dpHHBeQNv1pks3WnHRCem2Zp636lq54M2nKO2Sarr13pL4u6diQXf65yzXUn0mkk18SyIDCm9UOJYTi1w==} + peerDependencies: + mobx: ^6.9.0 + react: ^16.8.0 || ^17 || ^18 || ^19 + react-dom: '*' + react-native: '*' + peerDependenciesMeta: + react-dom: + optional: true + react-native: + optional: true + + mobx-react@9.2.0: + resolution: {integrity: sha512-dkGWCx+S0/1mfiuFfHRH8D9cplmwhxOV5CkXMp38u6rQGG2Pv3FWYztS0M7ncR6TyPRQKaTG/pnitInoYE9Vrw==} + peerDependencies: + mobx: ^6.9.0 + react: ^16.8.0 || ^17 || ^18 || ^19 + react-dom: '*' + react-native: '*' + peerDependenciesMeta: + react-dom: + optional: true + react-native: + optional: true + + mobx@6.13.6: + resolution: {integrity: sha512-r19KNV0uBN4b+ER8Z0gA4y+MzDYIQ2SvOmn3fUrqPnWXdQfakd9yfbPBDBF/p5I+bd3N5Rk1fHONIvMay+bJGA==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.8: + resolution: {integrity: sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + neo-async@2.6.2: + resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} + + node-fetch-h2@2.3.0: + resolution: {integrity: sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==} + engines: {node: 4.x || >=6.0.0} + + node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + + node-readfiles@0.2.0: + resolution: {integrity: sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==} + + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + oas-kit-common@1.0.8: + resolution: {integrity: sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==} + + oas-linter@3.2.2: + resolution: {integrity: sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==} + + oas-resolver@2.5.6: + resolution: {integrity: sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==} + hasBin: true + + oas-schema-walker@1.1.5: + resolution: {integrity: sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==} + + oas-validator@5.0.8: + resolution: {integrity: sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + openapi-sampler@1.6.1: + resolution: {integrity: sha512-s1cIatOqrrhSj2tmJ4abFYZQK6l5v+V4toO5q1Pa0DyN8mtyqy2I+Qrj5W9vOELEtybIMQs/TBZGVO/DtTFK8w==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + perfect-scrollbar@1.5.6: + resolution: {integrity: sha512-rixgxw3SxyJbCaSpo1n35A/fwI1r2rdwMKOTCg/AcG+xOEyZcE8UHVjpZMFCVImzsFoCZeJTT+M/rdEIQYO2nw==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pluralize@8.0.0: + resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==} + engines: {node: '>=4'} + + polished@4.3.1: + resolution: {integrity: sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==} + engines: {node: '>=10'} + + postcss-selector-parser@6.0.10: + resolution: {integrity: sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.4.49: + resolution: {integrity: sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==} + engines: {node: ^10 || ^12 || >=14} + + postcss@8.5.3: + resolution: {integrity: sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prismjs@1.29.0: + resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==} + engines: {node: '>=6'} + + prop-types@15.8.1: + resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==} + + protobufjs@7.4.0: + resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==} + engines: {node: '>=12.0.0'} + + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + randombytes@2.1.0: + resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + + react-dom@19.0.0: + resolution: {integrity: sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==} + peerDependencies: + react: ^19.0.0 + + react-i18next@15.4.1: + resolution: {integrity: sha512-ahGab+IaSgZmNPYXdV1n+OYky95TGpFwnKRflX/16dY04DsYYKHtVLjeny7sBSCREEcoMbAgSkFiGLF5g5Oofw==} + peerDependencies: + i18next: '>= 23.2.3' + react: '>= 16.8.0' + react-dom: '*' + react-native: '*' + peerDependenciesMeta: + react-dom: + optional: true + react-native: + optional: true + + react-icons@5.5.0: + resolution: {integrity: sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==} + peerDependencies: + react: '*' + + react-is@16.13.1: + resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + + react-refresh@0.14.2: + resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} + engines: {node: '>=0.10.0'} + + react-router-dom@7.2.0: + resolution: {integrity: sha512-cU7lTxETGtQRQbafJubvZKHEn5izNABxZhBY0Jlzdv0gqQhCPQt2J8aN5ZPjS6mQOXn5NnirWNh+FpE8TTYN0Q==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + + react-router@7.2.0: + resolution: {integrity: sha512-fXyqzPgCPZbqhrk7k3hPcCpYIlQ2ugIXDboHUzhJISFVy2DEPsmHgN588MyGmkIOv3jDgNfUE3kJi83L28s/LQ==} + engines: {node: '>=20.0.0'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + peerDependenciesMeta: + react-dom: + optional: true + + react-tabs@6.1.0: + resolution: {integrity: sha512-6QtbTRDKM+jA/MZTTefvigNxo0zz+gnBTVFw2CFVvq+f2BuH0nF0vDLNClL045nuTAdOoK/IL1vTP0ZLX0DAyQ==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + + react@19.0.0: + resolution: {integrity: sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==} + engines: {node: '>=0.10.0'} + + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + redoc@2.4.0: + resolution: {integrity: sha512-rFlfzFVWS9XJ6aYAs/bHnLhHP5FQEhwAHDBVgwb9L2FqDQ8Hu8rQ1G84iwaWXxZfPP9UWn7JdWkxI6MXr2ZDjw==} + engines: {node: '>=6.9', npm: '>=3.0.0'} + peerDependencies: + core-js: ^3.1.4 + mobx: ^6.0.4 + react: ^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0 + styled-components: ^4.1.1 || ^5.1.1 || ^6.0.5 + + reftools@1.1.9: + resolution: {integrity: sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==} + + regenerator-runtime@0.14.1: + resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rollup@4.34.8: + resolution: {integrity: sha512-489gTVMzAYdiZHFVA/ig/iYFllCcWFHMvUHI1rpFmkoUtRlQxqh6/yiNqnYibjMZ2b/+FUQwldG+aLsEt6bglQ==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + scheduler@0.25.0: + resolution: {integrity: sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + + set-cookie-parser@2.7.1: + resolution: {integrity: sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==} + + shallowequal@1.1.0: + resolution: {integrity: sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + should-equal@2.0.0: + resolution: {integrity: sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==} + + should-format@3.0.3: + resolution: {integrity: sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==} + + should-type-adaptors@1.1.0: + resolution: {integrity: sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==} + + should-type@1.4.0: + resolution: {integrity: sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==} + + should-util@1.0.1: + resolution: {integrity: sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==} + + should@13.2.3: + resolution: {integrity: sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==} + + simple-websocket@9.1.0: + resolution: {integrity: sha512-8MJPnjRN6A8UCp1I+H/dSFyjwJhp6wta4hsVRhjf8w9qBHRzxYt14RaOcjvQnhD1N4yKOddEjflwMnQM4VtXjQ==} + + slugify@1.4.7: + resolution: {integrity: sha512-tf+h5W1IrjNm/9rKKj0JU2MDMruiopx0jjVA5zCdBtcGjfp0+c5rHw/zADLC3IeKlGHtVbHtpfzvYA0OYT+HKg==} + engines: {node: '>=8.0.0'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + stickyfill@1.1.1: + resolution: {integrity: sha512-GCp7vHAfpao+Qh/3Flh9DXEJ/qSi0KJwJw6zYlZOtRYXWUIpMM6mC2rIep/dK8RQqwW0KxGJIllmjPIBOGN8AA==} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + strnum@1.1.0: + resolution: {integrity: sha512-a4NGarQIHRhvr+k8VXaHg6TMU6f3YEmi5CAb6RYgX2gwbGDBNMbr6coC6g0wmif5dLjHtmHUVD/qOxPq7D0tnQ==} + + styled-components@6.1.15: + resolution: {integrity: sha512-PpOTEztW87Ua2xbmLa7yssjNyUF9vE7wdldRfn1I2E6RTkqknkBYpj771OxM/xrvRGinLy2oysa7GOd7NcZZIA==} + engines: {node: '>= 16'} + peerDependencies: + react: '>= 16.8.0' + react-dom: '>= 16.8.0' + + stylis@4.3.2: + resolution: {integrity: sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + swagger2openapi@7.0.8: + resolution: {integrity: sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==} + hasBin: true + + tabbable@6.2.0: + resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} + + tailwindcss@4.0.7: + resolution: {integrity: sha512-yH5bPPyapavo7L+547h3c4jcBXcrKwybQRjwdEIVAd9iXRvy/3T1CC6XSQEgZtRySjKfqvo3Cc0ZF1DTheuIdA==} + + tapable@2.2.1: + resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} + engines: {node: '>=6'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + ts-api-utils@2.0.1: + resolution: {integrity: sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w==} + engines: {node: '>=18.12'} + peerDependencies: + typescript: '>=4.8.4' + + tslib@2.6.2: + resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + turbo-stream@2.4.0: + resolution: {integrity: sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + typescript-eslint@8.24.1: + resolution: {integrity: sha512-cw3rEdzDqBs70TIcb0Gdzbt6h11BSs2pS0yaq7hDWDBtCCSei1pPSUXE9qUdQ/Wm9NgFg8mKtMt1b8fTHIl1jA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.8.0' + + typescript@5.7.3: + resolution: {integrity: sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==} + engines: {node: '>=14.17'} + hasBin: true + + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + + uglify-js@3.19.3: + resolution: {integrity: sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==} + engines: {node: '>=0.8.0'} + hasBin: true + + undici-types@6.20.0: + resolution: {integrity: sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==} + + update-browserslist-db@1.1.2: + resolution: {integrity: sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js-replace@1.0.1: + resolution: {integrity: sha512-W+C9NWNLFOoBI2QWDp4UT9pv65r2w5Cx+3sTYFvtMdDBxkKt1syCqsUdSFAChbEe1uK5TfS04wt/nGwmaeIQ0g==} + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + url-template@2.0.8: + resolution: {integrity: sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw==} + + use-sync-external-store@1.4.0: + resolution: {integrity: sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vite@6.1.1: + resolution: {integrity: sha512-4GgM54XrwRfrOp297aIYspIti66k56v16ZnqHvrIM7mG+HjDlAwS7p+Srr7J6fGvEdOJ5JcQ/D9T7HhtdXDTzA==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: '>=1.21.0' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + void-elements@3.1.0: + resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} + engines: {node: '>=0.10.0'} + + webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wordwrap@1.0.0: + resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + ws@7.5.10: + resolution: {integrity: sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==} + engines: {node: '>=8.3.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yaml-ast-parser@0.0.43: + resolution: {integrity: sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==} + + yaml@1.10.2: + resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} + engines: {node: '>= 6'} + + yargs-parser@20.2.9: + resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} + engines: {node: '>=10'} + + yargs@17.0.1: + resolution: {integrity: sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==} + engines: {node: '>=12'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.26.2': + dependencies: + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.26.8': {} + + '@babel/core@7.26.9': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.9 + '@babel/helper-compilation-targets': 7.26.5 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.9) + '@babel/helpers': 7.26.9 + '@babel/parser': 7.26.9 + '@babel/template': 7.26.9 + '@babel/traverse': 7.26.9 + '@babel/types': 7.26.9 + convert-source-map: 2.0.0 + debug: 4.4.0 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.26.9': + dependencies: + '@babel/parser': 7.26.9 + '@babel/types': 7.26.9 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/helper-compilation-targets@7.26.5': + dependencies: + '@babel/compat-data': 7.26.8 + '@babel/helper-validator-option': 7.25.9 + browserslist: 4.24.4 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-module-imports@7.25.9': + dependencies: + '@babel/traverse': 7.26.9 + '@babel/types': 7.26.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.9)': + dependencies: + '@babel/core': 7.26.9 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.26.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.26.5': {} + + '@babel/helper-string-parser@7.25.9': {} + + '@babel/helper-validator-identifier@7.25.9': {} + + '@babel/helper-validator-option@7.25.9': {} + + '@babel/helpers@7.26.9': + dependencies: + '@babel/template': 7.26.9 + '@babel/types': 7.26.9 + + '@babel/parser@7.26.9': + dependencies: + '@babel/types': 7.26.9 + + '@babel/plugin-transform-react-jsx-self@7.25.9(@babel/core@7.26.9)': + dependencies: + '@babel/core': 7.26.9 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-transform-react-jsx-source@7.25.9(@babel/core@7.26.9)': + dependencies: + '@babel/core': 7.26.9 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/runtime@7.26.9': + dependencies: + regenerator-runtime: 0.14.1 + + '@babel/template@7.26.9': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.26.9 + '@babel/types': 7.26.9 + + '@babel/traverse@7.26.9': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.9 + '@babel/parser': 7.26.9 + '@babel/template': 7.26.9 + '@babel/types': 7.26.9 + debug: 4.4.0 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.26.9': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@emotion/is-prop-valid@1.2.2': + dependencies: + '@emotion/memoize': 0.8.1 + + '@emotion/memoize@0.8.1': {} + + '@emotion/unitless@0.8.1': {} + + '@esbuild/aix-ppc64@0.24.2': + optional: true + + '@esbuild/android-arm64@0.24.2': + optional: true + + '@esbuild/android-arm@0.24.2': + optional: true + + '@esbuild/android-x64@0.24.2': + optional: true + + '@esbuild/darwin-arm64@0.24.2': + optional: true + + '@esbuild/darwin-x64@0.24.2': + optional: true + + '@esbuild/freebsd-arm64@0.24.2': + optional: true + + '@esbuild/freebsd-x64@0.24.2': + optional: true + + '@esbuild/linux-arm64@0.24.2': + optional: true + + '@esbuild/linux-arm@0.24.2': + optional: true + + '@esbuild/linux-ia32@0.24.2': + optional: true + + '@esbuild/linux-loong64@0.24.2': + optional: true + + '@esbuild/linux-mips64el@0.24.2': + optional: true + + '@esbuild/linux-ppc64@0.24.2': + optional: true + + '@esbuild/linux-riscv64@0.24.2': + optional: true + + '@esbuild/linux-s390x@0.24.2': + optional: true + + '@esbuild/linux-x64@0.24.2': + optional: true + + '@esbuild/netbsd-arm64@0.24.2': + optional: true + + '@esbuild/netbsd-x64@0.24.2': + optional: true + + '@esbuild/openbsd-arm64@0.24.2': + optional: true + + '@esbuild/openbsd-x64@0.24.2': + optional: true + + '@esbuild/sunos-x64@0.24.2': + optional: true + + '@esbuild/win32-arm64@0.24.2': + optional: true + + '@esbuild/win32-ia32@0.24.2': + optional: true + + '@esbuild/win32-x64@0.24.2': + optional: true + + '@eslint-community/eslint-utils@4.4.1(eslint@9.20.1(jiti@2.4.2))': + dependencies: + eslint: 9.20.1(jiti@2.4.2) + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.1': {} + + '@eslint/config-array@0.19.2': + dependencies: + '@eslint/object-schema': 2.1.6 + debug: 4.4.0 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@eslint/core@0.11.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/eslintrc@3.2.0': + dependencies: + ajv: 6.12.6 + debug: 4.4.0 + espree: 10.3.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.20.0': {} + + '@eslint/object-schema@2.1.6': {} + + '@eslint/plugin-kit@0.2.6': + dependencies: + '@eslint/core': 0.11.0 + levn: 0.4.1 + + '@exodus/schemasafe@1.3.0': {} + + '@floating-ui/core@1.6.9': + dependencies: + '@floating-ui/utils': 0.2.9 + + '@floating-ui/dom@1.6.13': + dependencies: + '@floating-ui/core': 1.6.9 + '@floating-ui/utils': 0.2.9 + + '@floating-ui/react-dom@2.1.2(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/dom': 1.6.13 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@floating-ui/react@0.26.28(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/react-dom': 2.1.2(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@floating-ui/utils': 0.2.9 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + tabbable: 6.2.0 + + '@floating-ui/utils@0.2.9': {} + + '@headlessui/react@2.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@floating-ui/react': 0.26.28(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/focus': 3.19.1(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/interactions': 3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@tanstack/react-virtual': 3.13.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.6': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.3.1 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.3.1': {} + + '@humanwhocodes/retry@0.4.2': {} + + '@jridgewell/gen-mapping@0.3.8': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.19.0 + + '@opentelemetry/api-logs@0.53.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/context-async-hooks@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/core@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/exporter-trace-otlp-http@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-exporter-base@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.53.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.53.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + protobufjs: 7.4.0 + + '@opentelemetry/propagator-b3@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/propagator-jaeger@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/resources@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/sdk-logs@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.53.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-trace-base@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/sdk-trace-node@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-b3': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-jaeger': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + semver: 7.7.1 + + '@opentelemetry/semantic-conventions@1.27.0': {} + + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + + '@react-aria/focus@3.19.1(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/interactions': 3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-aria/utils': 3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + clsx: 2.1.1 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-aria/interactions@3.23.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/ssr': 3.9.7(react@19.0.0) + '@react-aria/utils': 3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-aria/ssr@3.9.7(react@19.0.0)': + dependencies: + '@swc/helpers': 0.5.15 + react: 19.0.0 + + '@react-aria/utils@3.27.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@react-aria/ssr': 3.9.7(react@19.0.0) + '@react-stately/utils': 3.10.5(react@19.0.0) + '@react-types/shared': 3.27.0(react@19.0.0) + '@swc/helpers': 0.5.15 + clsx: 2.1.1 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@react-stately/utils@3.10.5(react@19.0.0)': + dependencies: + '@swc/helpers': 0.5.15 + react: 19.0.0 + + '@react-types/shared@3.27.0(react@19.0.0)': + dependencies: + react: 19.0.0 + + '@redocly/ajv@8.11.2': + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js-replace: 1.0.1 + + '@redocly/cli@1.30.0': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/exporter-trace-otlp-http': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + '@redocly/openapi-core': 1.30.0 + abort-controller: 3.0.0 + chokidar: 3.6.0 + colorette: 1.4.0 + core-js: 3.40.0 + form-data: 4.0.2 + get-port-please: 3.1.2 + glob: 7.2.3 + handlebars: 4.7.8 + mobx: 6.13.6 + pluralize: 8.0.0 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + redoc: 2.4.0(core-js@3.40.0)(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(styled-components@6.1.15(react-dom@19.0.0(react@19.0.0))(react@19.0.0)) + semver: 7.7.1 + simple-websocket: 9.1.0 + styled-components: 6.1.15(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + yargs: 17.0.1 + transitivePeerDependencies: + - bufferutil + - encoding + - react-native + - supports-color + - utf-8-validate + + '@redocly/config@0.20.3': {} + + '@redocly/openapi-core@1.30.0': + dependencies: + '@redocly/ajv': 8.11.2 + '@redocly/config': 0.20.3 + colorette: 1.4.0 + https-proxy-agent: 7.0.6 + js-levenshtein: 1.1.6 + js-yaml: 4.1.0 + minimatch: 5.1.6 + pluralize: 8.0.0 + yaml-ast-parser: 0.0.43 + transitivePeerDependencies: + - supports-color + + '@rollup/rollup-android-arm-eabi@4.34.8': + optional: true + + '@rollup/rollup-android-arm64@4.34.8': + optional: true + + '@rollup/rollup-darwin-arm64@4.34.8': + optional: true + + '@rollup/rollup-darwin-x64@4.34.8': + optional: true + + '@rollup/rollup-freebsd-arm64@4.34.8': + optional: true + + '@rollup/rollup-freebsd-x64@4.34.8': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.34.8': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.34.8': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.34.8': + optional: true + + '@rollup/rollup-linux-loongarch64-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-powerpc64le-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.34.8': + optional: true + + '@rollup/rollup-linux-x64-musl@4.34.8': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.34.8': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.34.8': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.34.8': + optional: true + + '@swc/helpers@0.5.15': + dependencies: + tslib: 2.8.1 + + '@tailwindcss/node@4.0.7': + dependencies: + enhanced-resolve: 5.18.1 + jiti: 2.4.2 + tailwindcss: 4.0.7 + + '@tailwindcss/oxide-android-arm64@4.0.7': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.0.7': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.0.7': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.0.7': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.0.7': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.0.7': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.0.7': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.0.7': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.0.7': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.0.7': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.0.7': + optional: true + + '@tailwindcss/oxide@4.0.7': + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.0.7 + '@tailwindcss/oxide-darwin-arm64': 4.0.7 + '@tailwindcss/oxide-darwin-x64': 4.0.7 + '@tailwindcss/oxide-freebsd-x64': 4.0.7 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.0.7 + '@tailwindcss/oxide-linux-arm64-gnu': 4.0.7 + '@tailwindcss/oxide-linux-arm64-musl': 4.0.7 + '@tailwindcss/oxide-linux-x64-gnu': 4.0.7 + '@tailwindcss/oxide-linux-x64-musl': 4.0.7 + '@tailwindcss/oxide-win32-arm64-msvc': 4.0.7 + '@tailwindcss/oxide-win32-x64-msvc': 4.0.7 + + '@tailwindcss/postcss@4.0.7': + dependencies: + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.0.7 + '@tailwindcss/oxide': 4.0.7 + lightningcss: 1.29.1 + postcss: 8.5.3 + tailwindcss: 4.0.7 + + '@tailwindcss/typography@0.5.16(tailwindcss@4.0.7)': + dependencies: + lodash.castarray: 4.4.0 + lodash.isplainobject: 4.0.6 + lodash.merge: 4.6.2 + postcss-selector-parser: 6.0.10 + tailwindcss: 4.0.7 + + '@tailwindcss/vite@4.0.7(vite@6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1))': + dependencies: + '@tailwindcss/node': 4.0.7 + '@tailwindcss/oxide': 4.0.7 + lightningcss: 1.29.1 + tailwindcss: 4.0.7 + vite: 6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1) + + '@tanstack/react-virtual@3.13.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)': + dependencies: + '@tanstack/virtual-core': 3.13.0 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + + '@tanstack/virtual-core@3.13.0': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.26.9 + '@babel/types': 7.26.9 + '@types/babel__generator': 7.6.8 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.6 + + '@types/babel__generator@7.6.8': + dependencies: + '@babel/types': 7.26.9 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.26.9 + '@babel/types': 7.26.9 + + '@types/babel__traverse@7.20.6': + dependencies: + '@babel/types': 7.26.9 + + '@types/cookie@0.6.0': {} + + '@types/estree@1.0.6': {} + + '@types/json-schema@7.0.15': {} + + '@types/linkify-it@5.0.0': {} + + '@types/markdown-it@14.1.2': + dependencies: + '@types/linkify-it': 5.0.0 + '@types/mdurl': 2.0.0 + + '@types/mdurl@2.0.0': {} + + '@types/node@22.13.4': + dependencies: + undici-types: 6.20.0 + + '@types/react-dom@19.0.4(@types/react@19.0.10)': + dependencies: + '@types/react': 19.0.10 + + '@types/react@19.0.10': + dependencies: + csstype: 3.1.3 + + '@types/stylis@4.2.5': {} + + '@types/trusted-types@2.0.7': + optional: true + + '@typescript-eslint/eslint-plugin@8.24.1(@typescript-eslint/parser@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3))(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@eslint-community/regexpp': 4.12.1 + '@typescript-eslint/parser': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/scope-manager': 8.24.1 + '@typescript-eslint/type-utils': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/utils': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/visitor-keys': 8.24.1 + eslint: 9.20.1(jiti@2.4.2) + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.24.1 + '@typescript-eslint/types': 8.24.1 + '@typescript-eslint/typescript-estree': 8.24.1(typescript@5.7.3) + '@typescript-eslint/visitor-keys': 8.24.1 + debug: 4.4.0 + eslint: 9.20.1(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.24.1': + dependencies: + '@typescript-eslint/types': 8.24.1 + '@typescript-eslint/visitor-keys': 8.24.1 + + '@typescript-eslint/type-utils@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@typescript-eslint/typescript-estree': 8.24.1(typescript@5.7.3) + '@typescript-eslint/utils': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + debug: 4.4.0 + eslint: 9.20.1(jiti@2.4.2) + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.24.1': {} + + '@typescript-eslint/typescript-estree@8.24.1(typescript@5.7.3)': + dependencies: + '@typescript-eslint/types': 8.24.1 + '@typescript-eslint/visitor-keys': 8.24.1 + debug: 4.4.0 + fast-glob: 3.3.3 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.7.1 + ts-api-utils: 2.0.1(typescript@5.7.3) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3)': + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@9.20.1(jiti@2.4.2)) + '@typescript-eslint/scope-manager': 8.24.1 + '@typescript-eslint/types': 8.24.1 + '@typescript-eslint/typescript-estree': 8.24.1(typescript@5.7.3) + eslint: 9.20.1(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.24.1': + dependencies: + '@typescript-eslint/types': 8.24.1 + eslint-visitor-keys: 4.2.0 + + '@vitejs/plugin-react@4.3.4(vite@6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1))': + dependencies: + '@babel/core': 7.26.9 + '@babel/plugin-transform-react-jsx-self': 7.25.9(@babel/core@7.26.9) + '@babel/plugin-transform-react-jsx-source': 7.25.9(@babel/core@7.26.9) + '@types/babel__core': 7.20.5 + react-refresh: 0.14.2 + vite: 6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1) + transitivePeerDependencies: + - supports-color + + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + + acorn-jsx@5.3.2(acorn@8.14.0): + dependencies: + acorn: 8.14.0 + + acorn@8.14.0: {} + + agent-base@7.1.3: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + argparse@2.0.1: {} + + asynckit@0.4.0: {} + + autoprefixer@10.4.20(postcss@8.5.3): + dependencies: + browserslist: 4.24.4 + caniuse-lite: 1.0.30001700 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.5.3 + postcss-value-parser: 4.2.0 + + balanced-match@1.0.2: {} + + binary-extensions@2.3.0: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.4: + dependencies: + caniuse-lite: 1.0.30001700 + electron-to-chromium: 1.5.102 + node-releases: 2.0.19 + update-browserslist-db: 1.1.2(browserslist@4.24.4) + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-me-maybe@1.0.2: {} + + callsites@3.1.0: {} + + camelize@1.0.1: {} + + caniuse-lite@1.0.30001700: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + chokidar@3.6.0: + dependencies: + anymatch: 3.1.3 + braces: 3.0.3 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + + classnames@2.5.1: {} + + cliui@7.0.4: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + clsx@2.1.1: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + colorette@1.4.0: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + concat-map@0.0.1: {} + + convert-source-map@2.0.0: {} + + cookie@1.0.2: {} + + core-js@3.40.0: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + css-color-keywords@1.0.0: {} + + css-to-react-native@3.2.0: + dependencies: + camelize: 1.0.1 + css-color-keywords: 1.0.0 + postcss-value-parser: 4.2.0 + + cssesc@3.0.0: {} + + csstype@3.1.3: {} + + debug@4.4.0: + dependencies: + ms: 2.1.3 + + decko@1.2.0: {} + + deep-is@0.1.4: {} + + delayed-stream@1.0.0: {} + + detect-libc@1.0.3: {} + + dompurify@3.2.4: + optionalDependencies: + '@types/trusted-types': 2.0.7 + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + electron-to-chromium@1.5.102: {} + + emoji-regex@8.0.0: {} + + enhanced-resolve@5.18.1: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.2.1 + + entities@4.5.0: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.2.7 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + es6-promise@3.3.1: {} + + esbuild@0.24.2: + optionalDependencies: + '@esbuild/aix-ppc64': 0.24.2 + '@esbuild/android-arm': 0.24.2 + '@esbuild/android-arm64': 0.24.2 + '@esbuild/android-x64': 0.24.2 + '@esbuild/darwin-arm64': 0.24.2 + '@esbuild/darwin-x64': 0.24.2 + '@esbuild/freebsd-arm64': 0.24.2 + '@esbuild/freebsd-x64': 0.24.2 + '@esbuild/linux-arm': 0.24.2 + '@esbuild/linux-arm64': 0.24.2 + '@esbuild/linux-ia32': 0.24.2 + '@esbuild/linux-loong64': 0.24.2 + '@esbuild/linux-mips64el': 0.24.2 + '@esbuild/linux-ppc64': 0.24.2 + '@esbuild/linux-riscv64': 0.24.2 + '@esbuild/linux-s390x': 0.24.2 + '@esbuild/linux-x64': 0.24.2 + '@esbuild/netbsd-arm64': 0.24.2 + '@esbuild/netbsd-x64': 0.24.2 + '@esbuild/openbsd-arm64': 0.24.2 + '@esbuild/openbsd-x64': 0.24.2 + '@esbuild/sunos-x64': 0.24.2 + '@esbuild/win32-arm64': 0.24.2 + '@esbuild/win32-ia32': 0.24.2 + '@esbuild/win32-x64': 0.24.2 + + escalade@3.2.0: {} + + escape-string-regexp@4.0.0: {} + + eslint-plugin-react-hooks@5.1.0(eslint@9.20.1(jiti@2.4.2)): + dependencies: + eslint: 9.20.1(jiti@2.4.2) + + eslint-plugin-react-refresh@0.4.19(eslint@9.20.1(jiti@2.4.2)): + dependencies: + eslint: 9.20.1(jiti@2.4.2) + + eslint-scope@8.2.0: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@4.2.0: {} + + eslint@9.20.1(jiti@2.4.2): + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@9.20.1(jiti@2.4.2)) + '@eslint-community/regexpp': 4.12.1 + '@eslint/config-array': 0.19.2 + '@eslint/core': 0.11.0 + '@eslint/eslintrc': 3.2.0 + '@eslint/js': 9.20.0 + '@eslint/plugin-kit': 0.2.6 + '@humanfs/node': 0.16.6 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.2 + '@types/estree': 1.0.6 + '@types/json-schema': 7.0.15 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.0 + escape-string-regexp: 4.0.0 + eslint-scope: 8.2.0 + eslint-visitor-keys: 4.2.0 + espree: 10.3.0 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + optionalDependencies: + jiti: 2.4.2 + transitivePeerDependencies: + - supports-color + + espree@10.3.0: + dependencies: + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) + eslint-visitor-keys: 4.2.0 + + esquery@1.6.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + event-target-shim@5.0.1: {} + + eventemitter3@5.0.1: {} + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fast-safe-stringify@2.1.1: {} + + fast-xml-parser@4.5.2: + dependencies: + strnum: 1.1.0 + + fastq@1.19.0: + dependencies: + reusify: 1.0.4 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + + flatted@3.3.3: {} + + foreach@2.0.6: {} + + form-data@4.0.2: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + mime-types: 2.1.35 + + fraction.js@4.3.7: {} + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + gensync@1.0.0-beta.2: {} + + get-caller-file@2.0.5: {} + + get-intrinsic@1.2.7: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-port-please@3.1.2: {} + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@11.12.0: {} + + globals@14.0.0: {} + + globals@15.15.0: {} + + gopd@1.2.0: {} + + graceful-fs@4.2.11: {} + + graphemer@1.4.0: {} + + handlebars@4.7.8: + dependencies: + minimist: 1.2.8 + neo-async: 2.6.2 + source-map: 0.6.1 + wordwrap: 1.0.0 + optionalDependencies: + uglify-js: 3.19.3 + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + html-parse-stringify@3.0.1: + dependencies: + void-elements: 3.1.0 + + http2-client@1.3.5: {} + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.3 + debug: 4.4.0 + transitivePeerDependencies: + - supports-color + + i18next@24.2.2(typescript@5.7.3): + dependencies: + '@babel/runtime': 7.26.9 + optionalDependencies: + typescript: 5.7.3 + + ignore@5.3.2: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.3.0 + + is-extglob@2.1.1: {} + + is-fullwidth-code-point@3.0.0: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-number@7.0.0: {} + + isexe@2.0.0: {} + + jiti@2.4.2: {} + + js-levenshtein@1.1.6: {} + + js-tokens@4.0.0: {} + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + jsesc@3.1.0: {} + + json-buffer@3.0.1: {} + + json-pointer@0.6.2: + dependencies: + foreach: 2.0.6 + + json-schema-traverse@0.4.1: {} + + json-schema-traverse@1.0.0: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@2.2.3: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lightningcss-darwin-arm64@1.29.1: + optional: true + + lightningcss-darwin-x64@1.29.1: + optional: true + + lightningcss-freebsd-x64@1.29.1: + optional: true + + lightningcss-linux-arm-gnueabihf@1.29.1: + optional: true + + lightningcss-linux-arm64-gnu@1.29.1: + optional: true + + lightningcss-linux-arm64-musl@1.29.1: + optional: true + + lightningcss-linux-x64-gnu@1.29.1: + optional: true + + lightningcss-linux-x64-musl@1.29.1: + optional: true + + lightningcss-win32-arm64-msvc@1.29.1: + optional: true + + lightningcss-win32-x64-msvc@1.29.1: + optional: true + + lightningcss@1.29.1: + dependencies: + detect-libc: 1.0.3 + optionalDependencies: + lightningcss-darwin-arm64: 1.29.1 + lightningcss-darwin-x64: 1.29.1 + lightningcss-freebsd-x64: 1.29.1 + lightningcss-linux-arm-gnueabihf: 1.29.1 + lightningcss-linux-arm64-gnu: 1.29.1 + lightningcss-linux-arm64-musl: 1.29.1 + lightningcss-linux-x64-gnu: 1.29.1 + lightningcss-linux-x64-musl: 1.29.1 + lightningcss-win32-arm64-msvc: 1.29.1 + lightningcss-win32-x64-msvc: 1.29.1 + + linkify-it@5.0.0: + dependencies: + uc.micro: 2.1.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.castarray@4.4.0: {} + + lodash.isplainobject@4.0.6: {} + + lodash.merge@4.6.2: {} + + long@5.3.1: {} + + loose-envify@1.4.0: + dependencies: + js-tokens: 4.0.0 + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + lucide-react@0.474.0(react@19.0.0): + dependencies: + react: 19.0.0 + + lunr@2.3.9: {} + + mark.js@8.11.1: {} + + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 + + marked@4.3.0: {} + + math-intrinsics@1.1.0: {} + + mdurl@2.0.0: {} + + merge2@1.4.1: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.6: + dependencies: + brace-expansion: 2.0.1 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.1 + + minimist@1.2.8: {} + + mobx-react-lite@4.1.0(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + mobx: 6.13.6 + react: 19.0.0 + use-sync-external-store: 1.4.0(react@19.0.0) + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + mobx-react@9.2.0(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + mobx: 6.13.6 + mobx-react-lite: 4.1.0(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + react: 19.0.0 + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + mobx@6.13.6: {} + + ms@2.1.3: {} + + nanoid@3.3.8: {} + + natural-compare@1.4.0: {} + + neo-async@2.6.2: {} + + node-fetch-h2@2.3.0: + dependencies: + http2-client: 1.3.5 + + node-fetch@2.7.0: + dependencies: + whatwg-url: 5.0.0 + + node-readfiles@0.2.0: + dependencies: + es6-promise: 3.3.1 + + node-releases@2.0.19: {} + + normalize-path@3.0.0: {} + + normalize-range@0.1.2: {} + + oas-kit-common@1.0.8: + dependencies: + fast-safe-stringify: 2.1.1 + + oas-linter@3.2.2: + dependencies: + '@exodus/schemasafe': 1.3.0 + should: 13.2.3 + yaml: 1.10.2 + + oas-resolver@2.5.6: + dependencies: + node-fetch-h2: 2.3.0 + oas-kit-common: 1.0.8 + reftools: 1.1.9 + yaml: 1.10.2 + yargs: 17.0.1 + + oas-schema-walker@1.1.5: {} + + oas-validator@5.0.8: + dependencies: + call-me-maybe: 1.0.2 + oas-kit-common: 1.0.8 + oas-linter: 3.2.2 + oas-resolver: 2.5.6 + oas-schema-walker: 1.1.5 + reftools: 1.1.9 + should: 13.2.3 + yaml: 1.10.2 + + object-assign@4.1.1: {} + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + openapi-sampler@1.6.1: + dependencies: + '@types/json-schema': 7.0.15 + fast-xml-parser: 4.5.2 + json-pointer: 0.6.2 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-browserify@1.0.1: {} + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + perfect-scrollbar@1.5.6: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + pluralize@8.0.0: {} + + polished@4.3.1: + dependencies: + '@babel/runtime': 7.26.9 + + postcss-selector-parser@6.0.10: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.4.49: + dependencies: + nanoid: 3.3.8 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postcss@8.5.3: + dependencies: + nanoid: 3.3.8 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + prismjs@1.29.0: {} + + prop-types@15.8.1: + dependencies: + loose-envify: 1.4.0 + object-assign: 4.1.1 + react-is: 16.13.1 + + protobufjs@7.4.0: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 22.13.4 + long: 5.3.1 + + punycode.js@2.3.1: {} + + punycode@2.3.1: {} + + queue-microtask@1.2.3: {} + + randombytes@2.1.0: + dependencies: + safe-buffer: 5.2.1 + + react-dom@19.0.0(react@19.0.0): + dependencies: + react: 19.0.0 + scheduler: 0.25.0 + + react-i18next@15.4.1(i18next@24.2.2(typescript@5.7.3))(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + '@babel/runtime': 7.26.9 + html-parse-stringify: 3.0.1 + i18next: 24.2.2(typescript@5.7.3) + react: 19.0.0 + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + react-icons@5.5.0(react@19.0.0): + dependencies: + react: 19.0.0 + + react-is@16.13.1: {} + + react-refresh@0.14.2: {} + + react-router-dom@7.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + react-router: 7.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + + react-router@7.2.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + '@types/cookie': 0.6.0 + cookie: 1.0.2 + react: 19.0.0 + set-cookie-parser: 2.7.1 + turbo-stream: 2.4.0 + optionalDependencies: + react-dom: 19.0.0(react@19.0.0) + + react-tabs@6.1.0(react@19.0.0): + dependencies: + clsx: 2.1.1 + prop-types: 15.8.1 + react: 19.0.0 + + react@19.0.0: {} + + readable-stream@3.6.2: + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + redoc@2.4.0(core-js@3.40.0)(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(styled-components@6.1.15(react-dom@19.0.0(react@19.0.0))(react@19.0.0)): + dependencies: + '@redocly/openapi-core': 1.30.0 + classnames: 2.5.1 + core-js: 3.40.0 + decko: 1.2.0 + dompurify: 3.2.4 + eventemitter3: 5.0.1 + json-pointer: 0.6.2 + lunr: 2.3.9 + mark.js: 8.11.1 + marked: 4.3.0 + mobx: 6.13.6 + mobx-react: 9.2.0(mobx@6.13.6)(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + openapi-sampler: 1.6.1 + path-browserify: 1.0.1 + perfect-scrollbar: 1.5.6 + polished: 4.3.1 + prismjs: 1.29.0 + prop-types: 15.8.1 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + react-tabs: 6.1.0(react@19.0.0) + slugify: 1.4.7 + stickyfill: 1.1.1 + styled-components: 6.1.15(react-dom@19.0.0(react@19.0.0))(react@19.0.0) + swagger2openapi: 7.0.8 + url-template: 2.0.8 + transitivePeerDependencies: + - encoding + - react-native + - supports-color + + reftools@1.1.9: {} + + regenerator-runtime@0.14.1: {} + + require-directory@2.1.1: {} + + require-from-string@2.0.2: {} + + resolve-from@4.0.0: {} + + reusify@1.0.4: {} + + rollup@4.34.8: + dependencies: + '@types/estree': 1.0.6 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.34.8 + '@rollup/rollup-android-arm64': 4.34.8 + '@rollup/rollup-darwin-arm64': 4.34.8 + '@rollup/rollup-darwin-x64': 4.34.8 + '@rollup/rollup-freebsd-arm64': 4.34.8 + '@rollup/rollup-freebsd-x64': 4.34.8 + '@rollup/rollup-linux-arm-gnueabihf': 4.34.8 + '@rollup/rollup-linux-arm-musleabihf': 4.34.8 + '@rollup/rollup-linux-arm64-gnu': 4.34.8 + '@rollup/rollup-linux-arm64-musl': 4.34.8 + '@rollup/rollup-linux-loongarch64-gnu': 4.34.8 + '@rollup/rollup-linux-powerpc64le-gnu': 4.34.8 + '@rollup/rollup-linux-riscv64-gnu': 4.34.8 + '@rollup/rollup-linux-s390x-gnu': 4.34.8 + '@rollup/rollup-linux-x64-gnu': 4.34.8 + '@rollup/rollup-linux-x64-musl': 4.34.8 + '@rollup/rollup-win32-arm64-msvc': 4.34.8 + '@rollup/rollup-win32-ia32-msvc': 4.34.8 + '@rollup/rollup-win32-x64-msvc': 4.34.8 + fsevents: 2.3.3 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + safe-buffer@5.2.1: {} + + scheduler@0.25.0: {} + + semver@6.3.1: {} + + semver@7.7.1: {} + + set-cookie-parser@2.7.1: {} + + shallowequal@1.1.0: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + should-equal@2.0.0: + dependencies: + should-type: 1.4.0 + + should-format@3.0.3: + dependencies: + should-type: 1.4.0 + should-type-adaptors: 1.1.0 + + should-type-adaptors@1.1.0: + dependencies: + should-type: 1.4.0 + should-util: 1.0.1 + + should-type@1.4.0: {} + + should-util@1.0.1: {} + + should@13.2.3: + dependencies: + should-equal: 2.0.0 + should-format: 3.0.3 + should-type: 1.4.0 + should-type-adaptors: 1.1.0 + should-util: 1.0.1 + + simple-websocket@9.1.0: + dependencies: + debug: 4.4.0 + queue-microtask: 1.2.3 + randombytes: 2.1.0 + readable-stream: 3.6.2 + ws: 7.5.10 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + slugify@1.4.7: {} + + source-map-js@1.2.1: {} + + source-map@0.6.1: {} + + stickyfill@1.1.1: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string_decoder@1.3.0: + dependencies: + safe-buffer: 5.2.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-json-comments@3.1.1: {} + + strnum@1.1.0: {} + + styled-components@6.1.15(react-dom@19.0.0(react@19.0.0))(react@19.0.0): + dependencies: + '@emotion/is-prop-valid': 1.2.2 + '@emotion/unitless': 0.8.1 + '@types/stylis': 4.2.5 + css-to-react-native: 3.2.0 + csstype: 3.1.3 + postcss: 8.4.49 + react: 19.0.0 + react-dom: 19.0.0(react@19.0.0) + shallowequal: 1.1.0 + stylis: 4.3.2 + tslib: 2.6.2 + + stylis@4.3.2: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + swagger2openapi@7.0.8: + dependencies: + call-me-maybe: 1.0.2 + node-fetch: 2.7.0 + node-fetch-h2: 2.3.0 + node-readfiles: 0.2.0 + oas-kit-common: 1.0.8 + oas-resolver: 2.5.6 + oas-schema-walker: 1.1.5 + oas-validator: 5.0.8 + reftools: 1.1.9 + yaml: 1.10.2 + yargs: 17.0.1 + transitivePeerDependencies: + - encoding + + tabbable@6.2.0: {} + + tailwindcss@4.0.7: {} + + tapable@2.2.1: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + tr46@0.0.3: {} + + ts-api-utils@2.0.1(typescript@5.7.3): + dependencies: + typescript: 5.7.3 + + tslib@2.6.2: {} + + tslib@2.8.1: {} + + turbo-stream@2.4.0: {} + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + typescript-eslint@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3): + dependencies: + '@typescript-eslint/eslint-plugin': 8.24.1(@typescript-eslint/parser@8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3))(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/parser': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + '@typescript-eslint/utils': 8.24.1(eslint@9.20.1(jiti@2.4.2))(typescript@5.7.3) + eslint: 9.20.1(jiti@2.4.2) + typescript: 5.7.3 + transitivePeerDependencies: + - supports-color + + typescript@5.7.3: {} + + uc.micro@2.1.0: {} + + uglify-js@3.19.3: + optional: true + + undici-types@6.20.0: {} + + update-browserslist-db@1.1.2(browserslist@4.24.4): + dependencies: + browserslist: 4.24.4 + escalade: 3.2.0 + picocolors: 1.1.1 + + uri-js-replace@1.0.1: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + url-template@2.0.8: {} + + use-sync-external-store@1.4.0(react@19.0.0): + dependencies: + react: 19.0.0 + + util-deprecate@1.0.2: {} + + vite@6.1.1(@types/node@22.13.4)(jiti@2.4.2)(lightningcss@1.29.1): + dependencies: + esbuild: 0.24.2 + postcss: 8.5.3 + rollup: 4.34.8 + optionalDependencies: + '@types/node': 22.13.4 + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.29.1 + + void-elements@3.1.0: {} + + webidl-conversions@3.0.1: {} + + whatwg-url@5.0.0: + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + wordwrap@1.0.0: {} + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + ws@7.5.10: {} + + y18n@5.0.8: {} + + yallist@3.1.1: {} + + yaml-ast-parser@0.0.43: {} + + yaml@1.10.2: {} + + yargs-parser@20.2.9: {} + + yargs@17.0.1: + dependencies: + cliui: 7.0.4 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 20.2.9 + + yocto-queue@0.1.0: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml new file mode 100644 index 0000000..cc76cff --- /dev/null +++ b/pnpm-workspace.yaml @@ -0,0 +1,3 @@ +packages: + - 'api' + - 'frontend'